diff --git a/CHANGELOG.md b/CHANGELOG.md
index fd9ea42b..048eb51d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,10 @@
All notable changes to this project will be documented in this file.
This project adheres to [Semantic Versioning](http://semver.org/).
+## [3.1.0] - 2024-04-24
+### Added
+- Added support for Ocean GKE APIs.
+
## [3.0.0] - 2024-04-19
### Fixed
- Removed support for Azure V2 Scaleset Elastigroup (Deprecated).
diff --git a/docs/clients/ocean/ocean_gcp_client.md b/docs/clients/ocean/ocean_gcp_client.md
new file mode 100644
index 00000000..897feb27
--- /dev/null
+++ b/docs/clients/ocean/ocean_gcp_client.md
@@ -0,0 +1,445 @@
+
OceanGcpClient
+
+```python
+OceanGcpClient(self,
+ session=None,
+ print_output=True,
+ log_level=None,
+ user_agent=None,
+ timeout=None)
+```
+
+get_heartbeat_status
+
+```python
+OceanGcpClient.get_heartbeat_status(ocean_id: str)
+```
+
+Get the heartbeat status of the Ocean Controller for the cluster.
+The response returns the heartbeat status and the last heartbeat timestamp.
+
+__Arguments__
+
+- __ocean_id (String)__: ID of the Ocean Cluster
+
+__Returns__
+
+`(Object)`: Ocean Get Heartbeat response
+
+create_ocean_cluster
+
+```python
+OceanGcpClient.create_ocean_cluster(ocean: Ocean)
+```
+
+Create an Ocean Cluster
+
+__Arguments__
+
+- __ocean (Ocean)__: Ocean Object
+
+__Returns__
+
+`(Object)`: Ocean API response
+
+get_all_ocean_clusters
+
+```python
+OceanGcpClient.get_all_ocean_clusters()
+```
+
+List the configurations for all Ocean clusters in the specified account.
+
+__Returns__
+
+`(Object)`: Ocean API response
+
+delete_ocean_cluster
+
+```python
+OceanGcpClient.delete_ocean_cluster(ocean_id: str)
+```
+
+Delete a specified Ocean cluster.
+
+__Arguments__
+
+- __ocean_id (String)__: ID of the Ocean Cluster
+
+__Returns__
+
+`(Object)`: Ocean API response
+
+get_ocean_cluster
+
+```python
+OceanGcpClient.get_ocean_cluster(ocean_id: str)
+```
+
+Get the configuration for a specified Ocean cluster.
+
+__Arguments__
+
+- __ocean_id (String)__: ID of the Ocean Cluster
+
+__Returns__
+
+`(Object)`: Ocean API response
+
+update_ocean_cluster
+
+```python
+OceanGcpClient.update_ocean_cluster(ocean_id: str, ocean: Ocean)
+```
+
+Update an existing Ocean Cluster
+
+__Arguments__
+
+- __ocean_id (String)__: ID of the Ocean Cluster
+- __ocean (Ocean)__: Ocean object
+
+__Returns__
+
+`(Object)`: Ocean API response
+
+reimport_ocean_cluster
+
+```python
+OceanGcpClient.reimport_ocean_cluster(ocean_id: str)
+```
+
+Reimport the cluster's configuration from GKE.
+
+__Arguments__
+
+- __ocean_id (String)__: ID of the Ocean Cluster
+- __ocean (Ocean)__: Ocean object
+
+__Returns__
+
+`(Object)`: Reimport cluster response
+
+get_elastilog
+
+```python
+OceanGcpClient.get_elastilog(ocean_id: str,
+ from_date: str,
+ to_date: str,
+ severity: str = None,
+ resource_id: str = None,
+ limit: int = None)
+```
+
+Get group’s Elastilog by
+
+__Arguments__
+
+- __to_date (String)__: end date value
+- __from_date (String)__: beginning date value
+- __severity(String) (Optional)__: Log level severity
+- __resource_id(String) (Optional)__: specific resource identifier
+- __limit(int) (Optional)__: Maximum number of lines to extract in a response
+
+__Returns__
+
+`(Object)`: Ocean Get Log API response
+
+get_rightsizing_recommendations
+
+```python
+OceanGcpClient.get_rightsizing_recommendations(
+ ocean_id: str, filter: RightSizingRecommendationFilter = None)
+```
+
+Get right-sizing recommendations for an Ocean cluster and filter them according to namespace or label.
+
+__Arguments__
+
+- __ocean_id (String)__: Id of the Ocean Cluster
+- __filter (RightSizingRecommendationFilter)__: Optional - may be null.
+
+__Returns__
+
+`(Object)`: Ocean API response
+
+get_aggregated_cluster_costs
+
+```python
+OceanGcpClient.get_aggregated_cluster_costs(
+ ocean_id: str, aggregated_cluster_costs: AggregatedClusterCosts)
+```
+
+Get aggregated cluster costs
+
+__Arguments__
+
+- __ocean_id (String)__: ID of the Ocean Cluster
+- __aggregated_cluster_costs (AggregatedClusterCosts)__: Aggregated Cluster Costs request
+
+__Returns__
+
+`(Object)`: Aggregated Cluster Costs API response
+
+get_aggregated_summary_costs
+
+```python
+OceanGcpClient.get_aggregated_summary_costs(
+ ocean_id: str, aggregated_cluster_costs: AggregatedClusterCosts)
+```
+
+Get aggregated cluster costs
+
+__Arguments__
+
+- __ocean_id (String)__: ID of the Ocean Cluster
+- __aggregated_cluster_costs (AggregatedClusterCosts)__: Aggregated Cluster Costs request
+
+__Returns__
+
+`(Object)`: Aggregated Cluster Costs API response
+
+create_virtual_node_group
+
+```python
+OceanGcpClient.create_virtual_node_group(vng: VirtualNodeGroup)
+```
+
+Create a virtual node group.
+
+__Arguments__
+
+- __vng (VirtualNodeGroup)__: VirtualNodeGroup Object
+
+__Returns__
+
+`(Object)`: Ocean Launch Spec response
+
+get_all_virtual_node_groups
+
+```python
+OceanGcpClient.get_all_virtual_node_groups(ocean_id: str)
+```
+
+List the configurations for all virtual node groups in the account
+or in a specified cluster.
+
+__Returns__
+
+`(Object)`: Ocean VNG API response
+
+import_gke_nodepool_to_vng_configuration
+
+```python
+OceanGcpClient.import_gke_nodepool_to_vng_configuration(
+ node_pool_name: str, ocean_id: str)
+```
+
+Import GKE Nodepool configurations and generate valid Ocean Virtual Node Group (VNG) configuration
+which can be used to create VNGs
+
+__Returns__
+
+`(Object)`: Ocean API response
+
+delete_virtual_node_group
+
+```python
+OceanGcpClient.delete_virtual_node_group(vng_id: str,
+ delete_nodes: bool = None)
+```
+
+Delete an Ocean Cluster
+
+__Arguments__
+
+- __vng_id (String)__: ID of the Ocean VNG
+- __delete_nodes (Bool)__: When set to "true", all instances belonging to the deleted launch specification will be drained, detached, and terminated.
+
+__Returns__
+
+`(Object)`: Ocean Launch Specification Delete response
+
+update_virtual_node_group
+
+```python
+OceanGcpClient.update_virtual_node_group(vng_id: str,
+ vng: VirtualNodeGroup)
+```
+
+Update an existing VNG inside an Ocean Cluster
+
+__Arguments__
+
+- __vng_id (String)__: ID of the Ocean Virtual Node Group
+- __ocean (Ocean)__: Ocean object
+
+__Returns__
+
+`(Object)`: Ocean Launch Spec response
+
+get_virtual_node_group
+
+```python
+OceanGcpClient.get_virtual_node_group(ocean_launch_spec_id: str)
+```
+
+Get Virtual Node Group of the cluster
+
+__Arguments__
+
+- __ocean_launch_spec_id (String)__: Ocean cluster launch specification identifier
+
+__Returns__
+
+`(Object)`: Ocean Allowed Instance Types response
+
+initiate_roll
+
+```python
+OceanGcpClient.initiate_roll(ocean_id: str, cluster_roll: Roll)
+```
+
+Initiate Cluster Rolls
+
+__Arguments__
+
+- __ocean_id (String)__: ID of the Ocean Cluster
+- __cluster_roll (Roll)__: Cluster Roll / Roll with Instance Ids/ Launch specification Ids
+
+__Returns__
+
+`(Object)`: Cluster Roll API response
+
+list_rolls
+
+```python
+OceanGcpClient.list_rolls(ocean_id: str)
+```
+
+Get status for all rolls of an Ocean cluster.
+
+__Arguments__
+
+- __ocean_id (String)__: ID of the Ocean Cluster
+
+__Returns__
+
+`(Object)`: List of Cluster Roll API response
+
+update_roll
+
+```python
+OceanGcpClient.update_roll(ocean_id: str, roll_id: str, status: str)
+```
+
+Update a roll of an Ocean cluster.
+Performing the request will stop the next batch in a roll.
+
+__Arguments__
+
+- __ocean_id (String)__: ID of the Ocean Cluster
+- __roll_id (String)__: Ocean cluster roll identifier
+- __update_roll (UpdateRoll)__: update roll request
+
+__Returns__
+
+`(Object)`: Cluster Roll API response
+
+get_roll
+
+```python
+OceanGcpClient.get_roll(ocean_id: str, roll_id: str)
+```
+
+Get status for a roll of an Ocean cluster.
+
+__Arguments__
+
+- __ocean_id (String)__: ID of the Ocean Cluster
+- __account_id (String)__: The ID of the account associated with your token.
+- __roll_id (String)__: Ocean cluster roll identifier
+
+__Returns__
+
+`(Object)`: Cluster Roll API response
+
+get_cluster_nodes
+
+```python
+OceanGcpClient.get_cluster_nodes(ocean_id: str,
+ instance_name: str = None,
+ launch_spec_id: str = None)
+```
+
+Get nodes data of an Ocean cluster.
+
+__Arguments__
+
+- __ocean_id (String)__: ID of the Ocean Cluster
+- __instance_name (String)__: Get a specific node by instance id
+- __launch_spec_id (String)__: Ocean cluster launch specification identifier.
+
+__Returns__
+
+`(Object)`: Ocean Kubernetes AWS Nodes Data response
+
+update_elastigroup_to_ocean
+
+```python
+OceanGcpClient.update_elastigroup_to_ocean(group_id: str)
+```
+
+Upgrade an Elastigroup with Kubernetes integration to Ocean for Kubernetes cluster.
+
+__Arguments__
+
+- __group_id (str)__: Elastigroup identifier
+
+__Returns__
+
+`(Object)`: Ocean API response
+
+import_gke_cluster_to_ocean
+
+```python
+OceanGcpClient.import_gke_cluster_to_ocean(
+ cluster_name: str,
+ location: str,
+ import_gke_to_ocean: ImportGkeClusterToOcean,
+ include_launchSpecs: bool = None,
+ node_pool_name: str = None)
+```
+
+Create an Ocean configuration according to an GKE Cluster configuration.
+
+__Arguments__
+
+- __cluster_name (String)__: Name of the GKE Cluster.
+- __include_launchSpecs (String)__: When set to "true", GKE cluster node pools will be imported to Ocean custom VNG ("customLaunchSpec") configurations.
+- __location (String)__: Location GKE Cluster Master.
+- __node_pool_name (String)__: Name of the Node Pool to use as a default for the Cluster configuration.
+- __import_gke_to_ocean (ImportGkeClusterToOcean)__: ImportGkeClusterToOcean Object
+
+__Returns__
+
+`(Object)`: Ocean GKE Cluster Import Response
+
+launch_nodes_in_vng
+
+```python
+OceanGcpClient.launch_nodes_in_vng(ocean_launch_spec_id: str,
+ amount: int)
+```
+
+Launch nodes in Virtual Node Group.
+
+__Arguments__
+
+- __ocean_launch_spec_id (String)__: Ocean cluster launch specification identifier.
+- __amount (int)__: The number of nodes to launch.
+
+__Returns__
+
+`(Object)`: Ocean Virtual Node Group Launch API response
+
diff --git a/docs/models/ocean/gcp.md b/docs/models/ocean/gcp.md
new file mode 100644
index 00000000..24e30eca
--- /dev/null
+++ b/docs/models/ocean/gcp.md
@@ -0,0 +1,856 @@
+spotinst_sdk2.models.ocean.gcp
+
+
+Down
+
+```python
+Down(
+ self,
+ evaluation_periods: int = 'd3043820717d74d9a17694c176d39733',
+ max_scale_down_percentage: int = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __evaluation_periods__: int
+- __max_scale_down_percentage__: int
+
+Headroom
+
+```python
+Headroom(self,
+ cpu_per_unit: int = 'd3043820717d74d9a17694c176d39733',
+ gpu_per_unit: int = 'd3043820717d74d9a17694c176d39733',
+ memory_per_unit: int = 'd3043820717d74d9a17694c176d39733',
+ num_of_units: int = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __cpu_per_unit__: int
+- __gpu_per_unit__: int
+- __memory_per_unit__: int
+- __num_of_units__: int
+
+ResourceLimits
+
+```python
+ResourceLimits(self,
+ max_memory_gib: int = 'd3043820717d74d9a17694c176d39733',
+ max_v_cpu: int = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __max_memory_gib__: int
+- __max_v_cpu__: int
+
+AutoScaler
+
+```python
+AutoScaler(
+ self,
+ auto_headroom_percentage: int = 'd3043820717d74d9a17694c176d39733',
+ cooldown: int = 'd3043820717d74d9a17694c176d39733',
+ down: Down = 'd3043820717d74d9a17694c176d39733',
+ enable_automatic_and_manual_headroom:
+ bool = 'd3043820717d74d9a17694c176d39733',
+ headroom: Headroom = 'd3043820717d74d9a17694c176d39733',
+ is_auto_config: bool = 'd3043820717d74d9a17694c176d39733',
+ is_enabled: bool = 'd3043820717d74d9a17694c176d39733',
+ resource_limits: ResourceLimits = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __auto_headroom_percentage__: int
+- __cooldown__: int
+- __down__: Down
+- __enable_automatic_and_manual_headroom__: bool
+- __headroom__: Headroom
+- __is_auto_config__: bool
+- __is_enabled__: bool
+- __resource_limits__: ResourceLimits
+
+Capacity
+
+```python
+Capacity(self,
+ maximum: int = 'd3043820717d74d9a17694c176d39733',
+ minimum: int = 'd3043820717d74d9a17694c176d39733',
+ target: int = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __maximum__: int
+- __minimum__: int
+- __target__: int
+
+LocationType
+
+```python
+LocationType(cls, value, names=None, *, module, qualname, type, start)
+```
+An enumeration.
+regional
+
+
+NamedPorts
+
+```python
+NamedPorts(self,
+ name: str = 'd3043820717d74d9a17694c176d39733',
+ ports: typing.List[int] = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __name__: str
+- __ports__: List[int]
+
+Scheme
+
+```python
+Scheme(cls, value, names=None, *, module, qualname, type, start)
+```
+An enumeration.
+external
+
+
+internal
+
+
+BackendServices
+
+```python
+BackendServices(
+ self,
+ backend_service_name: str = 'd3043820717d74d9a17694c176d39733',
+ location_type: LocationType = 'd3043820717d74d9a17694c176d39733',
+ named_ports: NamedPorts = 'd3043820717d74d9a17694c176d39733',
+ scheme: Scheme = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __backend_service_name__: str
+- __location_type__: LocationType
+- __named_ports__: NamedPorts
+- __scheme__: Scheme
+
+InstanceTypes
+
+```python
+InstanceTypes(
+ self,
+ blacklist: typing.List[str] = 'd3043820717d74d9a17694c176d39733',
+ whitelist: typing.List[str] = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __blacklist__: List[str]
+- __whitelist__: List[str]
+
+Labels
+
+```python
+Labels(self,
+ key: str = 'd3043820717d74d9a17694c176d39733',
+ value: str = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __key__: str
+- __value__: str
+
+
+
+```python
+Metadata(self,
+ key: str = 'd3043820717d74d9a17694c176d39733',
+ value: str = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __key__: str
+- __value__: str
+
+RootVolumeType
+
+```python
+RootVolumeType(cls, value, names=None, *, module, qualname, type, start)
+```
+An enumeration.
+pd_ssd
+
+
+pd_standard
+
+
+ShieldedInstanceConfig
+
+```python
+ShieldedInstanceConfig(
+ self,
+ enable_integrity_monitoring: bool = 'd3043820717d74d9a17694c176d39733',
+ enable_secure_boot: bool = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __enable_integrity_monitoring__: bool
+- __enable_secure_boot__: bool
+
+LaunchSpecification
+
+```python
+LaunchSpecification(
+ self,
+ ip_forwarding: bool = 'd3043820717d74d9a17694c176d39733',
+ labels:
+ typing.List[spotinst_sdk2.models.ocean.gcp.Labels] = 'd3043820717d74d9a17694c176d39733',
+ metadata:
+ typing.List[spotinst_sdk2.models.ocean.gcp.Metadata] = 'd3043820717d74d9a17694c176d39733',
+ min_cpu_platform: str = 'd3043820717d74d9a17694c176d39733',
+ root_volume_size_in_gb: int = 'd3043820717d74d9a17694c176d39733',
+ root_volume_type: RootVolumeType = 'd3043820717d74d9a17694c176d39733',
+ service_account: str = 'd3043820717d74d9a17694c176d39733',
+ shielded_instance_config:
+ ShieldedInstanceConfig = 'd3043820717d74d9a17694c176d39733',
+ source_image: str = 'd3043820717d74d9a17694c176d39733',
+ tags: typing.List[str] = 'd3043820717d74d9a17694c176d39733',
+ use_as_template_only: bool = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __ip_forwarding__: bool
+- __labels__: List[Labels]
+- __metadata__: List[Metadata]
+- __min_cpu_platform__: str
+- __root_volume_size_in_gb__: int
+- __root_volume_type__: RootVolumeType
+- __service_account__: str
+- __shielded_instance_config__: ShieldedInstanceConfig
+- __source_image__: str
+- __tags__: List[str]
+- __use_as_template_only__: bool
+
+AccessConfigs
+
+```python
+AccessConfigs(self,
+ name: str = 'd3043820717d74d9a17694c176d39733',
+ type: str = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __name__: str
+- __type__: str
+
+AliasIpRanges
+
+```python
+AliasIpRanges(
+ self,
+ ip_cidr_range: str = 'd3043820717d74d9a17694c176d39733',
+ subnetwork_range_name: str = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __ip_cidr_range__: str
+- __subnetwork_range_name__: str
+
+NetworkInterfaces
+
+```python
+NetworkInterfaces(
+ self,
+ access_configs:
+ typing.List[spotinst_sdk2.models.ocean.gcp.AccessConfigs] = 'd3043820717d74d9a17694c176d39733',
+ alias_ip_ranges:
+ typing.List[spotinst_sdk2.models.ocean.gcp.AliasIpRanges] = 'd3043820717d74d9a17694c176d39733',
+ network: str = 'd3043820717d74d9a17694c176d39733',
+ project_id: str = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __access_configs__: List[AccessConfigs]
+- __alias_ip_ranges__: List[AliasIpRanges]
+- __network__: str
+- __project_id__: str
+
+Compute
+
+```python
+Compute(
+ self,
+ availability_zones:
+ typing.List[str] = 'd3043820717d74d9a17694c176d39733',
+ backend_services: BackendServices = 'd3043820717d74d9a17694c176d39733',
+ instance_types: InstanceTypes = 'd3043820717d74d9a17694c176d39733',
+ launch_specification:
+ LaunchSpecification = 'd3043820717d74d9a17694c176d39733',
+ network_interfaces:
+ typing.List[spotinst_sdk2.models.ocean.gcp.NetworkInterfaces] = 'd3043820717d74d9a17694c176d39733',
+ subnet_name: str = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __availability_zones__: List[str]
+- __backend_services__: BackendServices
+- __instance_types__: InstanceTypes
+- __launch_specification__: LaunchSpecification
+- __network_interfaces__: List[NetworkInterfaces]
+- __subnet_name__: str
+
+GKE
+
+```python
+GKE(self,
+ cluster_name: str = 'd3043820717d74d9a17694c176d39733',
+ master_location: str = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __cluster_name__: str
+- __master_location__: str
+
+ShutdownHours
+
+```python
+ShutdownHours(
+ self,
+ is_enabled: bool = 'd3043820717d74d9a17694c176d39733',
+ time_windows: typing.List[str] = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __is_enabled__: bool
+- __time_windows__: List[str]
+
+ClusterRoll
+
+```python
+ClusterRoll(
+ self,
+ batch_min_healthy_percentage: int = 'd3043820717d74d9a17694c176d39733',
+ batch_size_percentage: int = 'd3043820717d74d9a17694c176d39733',
+ comment: str = 'd3043820717d74d9a17694c176d39733',
+ respect_pdb: bool = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __batch_min_healthy_percentage__: int
+- __batch_size_percentage__: int
+- __comment__: str
+- __respect_pdb__: bool
+
+Parameters
+
+```python
+Parameters(self,
+ cluster_roll: ClusterRoll = 'd3043820717d74d9a17694c176d39733'
+ )
+```
+
+__Arguments__
+
+- __cluster_roll__: ClusterRoll
+
+Tasks
+
+```python
+Tasks(self,
+ cron_expression: str = 'd3043820717d74d9a17694c176d39733',
+ is_enabled: bool = 'd3043820717d74d9a17694c176d39733',
+ parameters: Parameters = 'd3043820717d74d9a17694c176d39733',
+ task_type: str = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __cron_expression__: str
+- __is_enabled__: bool
+- __parameters__: Parameters
+- __task_type__: str
+
+Scheduling
+
+```python
+Scheduling(
+ self,
+ shutdown_hours: ShutdownHours = 'd3043820717d74d9a17694c176d39733',
+ tasks:
+ typing.List[spotinst_sdk2.models.ocean.gcp.Tasks] = 'd3043820717d74d9a17694c176d39733'
+)
+```
+
+__Arguments__
+
+- __shutdown_hours__: ShutdownHours
+- __tasks__: List[Tasks]
+
+ContainerImage
+
+```python
+ContainerImage(
+ self,
+ approved_images: typing.List[str] = 'd3043820717d74d9a17694c176d39733'
+)
+```
+
+approved_images: List[str]
+
+Security
+
+```python
+Security(
+ self,
+ container_image: ContainerImage = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __container_image__: ContainerImage
+
+ProvisioningModel
+
+```python
+ProvisioningModel(cls,
+ value,
+ names=None,
+ *,
+ module,
+ qualname,
+ type,
+ start)
+```
+An enumeration.
+preemptible
+
+
+spot
+
+
+Strategy
+
+```python
+Strategy(
+ self,
+ draining_timeout: int = 'd3043820717d74d9a17694c176d39733',
+ preemptible_percentage: int = 'd3043820717d74d9a17694c176d39733',
+ provisioning_model: ProvisioningModel = 'd3043820717d74d9a17694c176d39733'
+)
+```
+
+__Arguments__
+
+- __draining_timeout__: int
+- __preemptible_percentage__: int
+- __provisioning_model__: ProvisioningModel
+
+Ocean
+
+```python
+Ocean(self,
+ auto_scaler: AutoScaler = 'd3043820717d74d9a17694c176d39733',
+ capacity: Capacity = 'd3043820717d74d9a17694c176d39733',
+ compute: Compute = 'd3043820717d74d9a17694c176d39733',
+ controller_cluster_id: str = 'd3043820717d74d9a17694c176d39733',
+ gke: GKE = 'd3043820717d74d9a17694c176d39733',
+ name: str = 'd3043820717d74d9a17694c176d39733',
+ scheduling: Scheduling = 'd3043820717d74d9a17694c176d39733',
+ security: Security = 'd3043820717d74d9a17694c176d39733',
+ strategy: Strategy = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __auto_scaler__: AutoScaler
+- __capacity__: Capacity
+- __compute__: Compute
+- __controller_cluster_id__: str
+- __gke__: GKE
+- __name__: str
+- __scheduling__: Scheduling
+- __security__: Security
+- __strategy__: Strategy
+
+Type
+
+```python
+Type(cls, value, names=None, *, module, qualname, type, start)
+```
+An enumeration.
+annotation
+
+
+label
+
+
+Operator
+
+```python
+Operator(cls, value, names=None, *, module, qualname, type, start)
+```
+An enumeration.
+does_not_exist
+
+
+equals
+
+
+exists
+
+
+not_equals
+
+
+Attribute
+
+```python
+Attribute(self,
+ type: Type = 'd3043820717d74d9a17694c176d39733',
+ key: str = 'd3043820717d74d9a17694c176d39733',
+ operator: Operator = 'd3043820717d74d9a17694c176d39733',
+ value: str = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __type__: Type
+- __key__: str
+- __operator__: Operator
+- __value__: str
+
+RightSizingRecommendationFilter
+
+```python
+RightSizingRecommendationFilter(
+ self,
+ namespaces: typing.List[str] = 'd3043820717d74d9a17694c176d39733',
+ attribute: Attribute = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Attribute__
+
+namespaces: List[str]
+attribute: Attribute
+
+AllMatch
+
+```python
+AllMatch(
+ self,
+ all_match:
+ typing.List[spotinst_sdk2.models.ocean.gcp.Attribute] = 'd3043820717d74d9a17694c176d39733'
+)
+```
+
+__Arguments__
+
+- __all_match__: List[Attribute]
+
+Conditions
+
+```python
+Conditions(
+ self,
+ any_match:
+ typing.List[spotinst_sdk2.models.ocean.gcp.AllMatch] = 'd3043820717d74d9a17694c176d39733'
+)
+```
+
+__Arguments__
+
+- __any_match__: List[AllMatch]
+
+Scope
+
+```python
+Scope(cls, value, names=None, *, module, qualname, type, start)
+```
+An enumeration.
+namespace
+
+
+resource
+
+
+Filter
+
+```python
+Filter(self,
+ conditions: Conditions = 'd3043820717d74d9a17694c176d39733',
+ scope: Scope = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __conditions__: Conditions
+- __scope__: Scope
+
+GroupBy
+
+```python
+GroupBy(cls, value, names=None, *, module, qualname, type, start)
+```
+An enumeration.
+namespace
+
+
+namespace_annotation
+
+
+namespace_label
+
+
+resource_annotation
+
+
+resource_label
+
+
+AggregatedClusterCosts
+
+```python
+AggregatedClusterCosts(
+ self,
+ end_time: str = 'd3043820717d74d9a17694c176d39733',
+ aggregated_filter: Filter = 'd3043820717d74d9a17694c176d39733',
+ group_by: GroupBy = 'namespace',
+ start_time: str = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __end_time__: str
+- __aggregated_filter__: Filter
+- __group_by__: GroupBy
+- __start_time__: str
+
+AutoScale
+
+```python
+AutoScale(
+ self,
+ auto_headroom_percentage: int = 'd3043820717d74d9a17694c176d39733',
+ headrooms:
+ typing.List[spotinst_sdk2.models.ocean.gcp.Headroom] = 'd3043820717d74d9a17694c176d39733',
+ down: Down = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __auto_headroom_percentage__: int
+- __headrooms__: List[Headroom]
+- __down__: Down
+
+VNGResourceLimits
+
+```python
+VNGResourceLimits(
+ self,
+ max_instance_count: int = 'd3043820717d74d9a17694c176d39733',
+ min_instance_count: int = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __max_instance_count__: int
+- __min_instance_count__: int
+
+Config
+
+```python
+Config(
+ self,
+ headrooms:
+ typing.List[spotinst_sdk2.models.ocean.gcp.Headroom] = 'd3043820717d74d9a17694c176d39733'
+)
+```
+
+Arguments
+headrooms: List[Headroom]
+
+VNGTasks
+
+```python
+VNGTasks(self,
+ config: Config = 'd3043820717d74d9a17694c176d39733',
+ cron_expression: str = 'd3043820717d74d9a17694c176d39733',
+ is_enabled: bool = 'd3043820717d74d9a17694c176d39733',
+ task_type: str = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __config__: Config
+- __cron_expression__: str
+- __is_enabled__: bool
+- __task_type__: str
+
+VNGScheduling
+
+```python
+VNGScheduling(
+ self, tasks: typing.List[spotinst_sdk2.models.ocean.gcp.VNGTasks])
+```
+
+__Arguments__
+
+- __tasks__: List[VNGTasks]
+
+Storage
+
+```python
+Storage(self, local_ssd_count: int = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __local_ssd_count__: int
+
+VNGStrategy
+
+```python
+VNGStrategy(
+ self,
+ preemptible_percentage: int = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __preemptible_percentage__: int
+
+Taints
+
+```python
+Taints(self,
+ effect: str = 'd3043820717d74d9a17694c176d39733',
+ key: str = 'd3043820717d74d9a17694c176d39733',
+ value: str = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __effect__: str
+- __key__: str
+- __value__: str
+
+VirtualNodeGroup
+
+```python
+VirtualNodeGroup(
+ self,
+ auto_scale: AutoScale = 'd3043820717d74d9a17694c176d39733',
+ availability_zones: typing.List[str] = 'd3043820717d74d9a17694c176d39733',
+ instance_types: typing.List[str] = 'd3043820717d74d9a17694c176d39733',
+ labels:
+ typing.List[spotinst_sdk2.models.ocean.gcp.Labels] = 'd3043820717d74d9a17694c176d39733',
+ metadata:
+ typing.List[spotinst_sdk2.models.ocean.gcp.Metadata] = 'd3043820717d74d9a17694c176d39733',
+ name: str = 'd3043820717d74d9a17694c176d39733',
+ network_interfaces:
+ typing.List[spotinst_sdk2.models.ocean.gcp.NetworkInterfaces] = 'd3043820717d74d9a17694c176d39733',
+ ocean_id: str = 'd3043820717d74d9a17694c176d39733',
+ resource_limits: VNGResourceLimits = 'd3043820717d74d9a17694c176d39733',
+ restrict_scale_down: bool = 'd3043820717d74d9a17694c176d39733',
+ root_volume_size_in_gb: int = 'd3043820717d74d9a17694c176d39733',
+ root_volume_type: str = 'd3043820717d74d9a17694c176d39733',
+ scheduling: VNGScheduling = 'd3043820717d74d9a17694c176d39733',
+ service_account: str = 'd3043820717d74d9a17694c176d39733',
+ shielded_instance_config:
+ ShieldedInstanceConfig = 'd3043820717d74d9a17694c176d39733',
+ source_image: str = 'd3043820717d74d9a17694c176d39733',
+ storage: Storage = 'd3043820717d74d9a17694c176d39733',
+ strategy: VNGStrategy = 'd3043820717d74d9a17694c176d39733',
+ tags: typing.List[str] = 'd3043820717d74d9a17694c176d39733',
+ taints:
+ typing.List[spotinst_sdk2.models.ocean.gcp.Taints] = 'd3043820717d74d9a17694c176d39733'
+)
+```
+
+__Arguments__
+
+- __auto_scale__: AutoScale
+- __availability_zones__: List[str]
+- __instance_types__: List[str]
+- __labels__: List[Labels]
+- __metadata__: List[Metadata]
+- __name__: str
+- __network_interfaces__: List[NetworkInterfaces]
+- __ocean_id__: str
+- __resource_limits__: ResourceLimits
+- __restrict_scale_down__: bool
+- __root_volume_size_in_gb__: int
+- __root_volume_type__: str
+- __scheduling__: Scheduling
+- __service_account__: str
+- __shielded_instance_config__: ShieldedInstanceConfig
+- __source_image__: str
+- __storage__: Storage
+- __strategy__: VNGStrategy
+- __tags__: List[str]
+- __taints__: List[Taints]
+
+Roll
+
+```python
+Roll(
+ self,
+ batch_min_healthy_percentage: int = 'd3043820717d74d9a17694c176d39733',
+ batch_size_percentage: int = 'd3043820717d74d9a17694c176d39733',
+ comment: str = 'd3043820717d74d9a17694c176d39733',
+ launch_spec_ids: typing.List[str] = 'd3043820717d74d9a17694c176d39733',
+ instance_ids: typing.List[str] = 'd3043820717d74d9a17694c176d39733',
+ respect_pdb: bool = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __batch_min_healthy_percentage__: int
+- __batch_size_percentage__: int
+- __comment__: str
+- __instance_names__: List[str]
+- __launch_spec_ids__: List[str]
+- __respect_pdb__: bool
+
+ImportGkeClusterToOcean
+
+```python
+ImportGkeClusterToOcean(
+ self,
+ auto_scaler: AutoScaler = 'd3043820717d74d9a17694c176d39733',
+ availability_zones:
+ typing.List[str] = 'd3043820717d74d9a17694c176d39733',
+ backend_services:
+ typing.List[spotinst_sdk2.models.ocean.gcp.BackendServices] = 'd3043820717d74d9a17694c176d39733',
+ capacity: Capacity = 'd3043820717d74d9a17694c176d39733',
+ controller_cluster_id: str = 'd3043820717d74d9a17694c176d39733',
+ instance_types: InstanceTypes = 'd3043820717d74d9a17694c176d39733',
+ name: str = 'd3043820717d74d9a17694c176d39733')
+```
+
+__Arguments__
+
+- __auto_scaler__: AutoScaler
+- __availability_zones__: List[str]
+- __backend_services__: List[BackendServices]
+- __capacity__: Capacity
+- __controller_cluster_id__: str
+- __instance_types__: InstanceTypes
+- __name__: str
+
diff --git a/pydocmd.yml b/pydocmd.yml
index 9fff6384..e4222d16 100755
--- a/pydocmd.yml
+++ b/pydocmd.yml
@@ -23,6 +23,8 @@ generate:
- spotinst_sdk2.clients.ocean.OceanAwsClient++
- clients/ocean/ocean_azure_client.md:
- spotinst_sdk2.clients.ocean.OceanAzureClient++
+ - clients/ocean/ocean_gcp_client.md:
+ - spotinst_sdk2.clients.ocean.OceanGcpClient++
- clients/ocean_cd/ocean_cd_client.md:
- spotinst_sdk2.clients.ocean_cd.OceanCDClient++
- clients/subscription/subscription_client.md:
@@ -53,6 +55,8 @@ generate:
- spotinst_sdk2.models.ocean.aws++
- models/ocean/azure.md:
- spotinst_sdk2.models.ocean.azure++
+ - models/ocean/gcp.md:
+ - spotinst_sdk2.models.ocean.gcp++
- models/ocean_cd.md:
- spotinst_sdk2.models.ocean_cd++
- spotinst_sdk2.models.ocean_cd.rollout_spec++
diff --git a/setup.py b/setup.py
index 78da46c7..edc9d383 100755
--- a/setup.py
+++ b/setup.py
@@ -81,6 +81,7 @@
"spotinst_sdk2.models.ocean",
"spotinst_sdk2.models.ocean.aws",
"spotinst_sdk2.models.ocean.azure",
+ "spotinst_sdk2.models.ocean.gcp",
"spotinst_sdk2.models.ocean_cd",
"spotinst_sdk2.models.ocean_cd",
"spotinst_sdk2.models.setup",
diff --git a/spotinst_sdk2/__init__.py b/spotinst_sdk2/__init__.py
index 156be7c1..2e16b7c1 100755
--- a/spotinst_sdk2/__init__.py
+++ b/spotinst_sdk2/__init__.py
@@ -54,7 +54,9 @@ def client(self, service, print_output=True, log_level=None, user_agent=None, ti
"ocean_aws": OceanAwsClient(session=self.session, print_output=print_output, log_level=log_level,
user_agent=user_agent, timeout=timeout),
"ocean_azure": OceanAzureClient(session=self.session, print_output=print_output, log_level=log_level,
- user_agent=user_agent, timeout=timeout),
+ user_agent=user_agent, timeout=timeout),
+ "ocean_gcp": OceanGcpClient(session=self.session, print_output=print_output, log_level=log_level,
+ user_agent=user_agent, timeout=timeout),
"oceancd": OceanCDClient(session=self.session, print_output=print_output, log_level=log_level,
user_agent=user_agent, timeout=timeout),
"managed_instance_aws": ManagedInstanceAwsClient(session=self.session, print_output=print_output,
diff --git a/spotinst_sdk2/clients/ocean/__init__.py b/spotinst_sdk2/clients/ocean/__init__.py
index 975d2eee..eef292dd 100644
--- a/spotinst_sdk2/clients/ocean/__init__.py
+++ b/spotinst_sdk2/clients/ocean/__init__.py
@@ -4,6 +4,7 @@
from spotinst_sdk2.client import Client
import spotinst_sdk2.models.ocean.aws as aws_ocean
import spotinst_sdk2.models.ocean.azure as azure_ocean
+import spotinst_sdk2.models.ocean.gcp as gcp_ocean
# region AWS
@@ -1693,3 +1694,662 @@ def list_migrations(self, ocean_id: str):
return formatted_response["response"]
# endregion
+
+
+class OceanGcpClient(Client):
+ __base_ocean_url = "/ocean/k8s/cluster/"
+ __base_ocean_cluster_url = "/ocean/gcp/k8s/cluster"
+ __base_ocean_launchspec_url = "/ocean/gcp/k8s/launchSpec"
+
+ def get_heartbeat_status(self, ocean_id: str):
+ """
+ Get the heartbeat status of the Ocean Controller for the cluster.
+ The response returns the heartbeat status and the last heartbeat timestamp.
+
+ # Arguments
+ ocean_id (String): ID of the Ocean Cluster
+
+ # Returns
+ (Object): Ocean Get Heartbeat response
+ """
+ response = self.send_get(
+ url=self.__base_ocean_url + ocean_id + "/controllerHeartbeat",
+ entity_name="ocean (Cluster Heartbeat)"
+ )
+
+ formatted_response = self.convert_json(
+ response, self.camel_to_underscore)
+
+ return formatted_response["response"]["items"][0]
+
+ def create_ocean_cluster(self, ocean: gcp_ocean.Ocean):
+ """
+ Create an Ocean Cluster
+
+ # Arguments
+ ocean (Ocean): Ocean Object
+
+ # Returns
+ (Object): Ocean API response
+ """
+ ocean = gcp_ocean.OceanRequest(ocean)
+
+ excluded_missing_dict = self.exclude_missing(
+ json.loads(ocean.toJSON()))
+
+ formatted_missing_dict = self.convert_json(
+ excluded_missing_dict, self.underscore_to_camel)
+
+ body_json = json.dumps(formatted_missing_dict)
+
+ response = self.send_post(
+ body=body_json,
+ url=self.__base_ocean_cluster_url,
+ entity_name='ocean')
+
+ formatted_response = self.convert_json(response,
+ self.camel_to_underscore)
+
+ return formatted_response["response"]["items"][0]
+
+ def get_all_ocean_clusters(self):
+ """
+ List the configurations for all Ocean clusters in the specified account.
+
+ # Returns
+ (Object): Ocean API response
+ """
+
+ response = self.send_get(
+ url=self.__base_ocean_cluster_url,
+ entity_name="ocean"
+ )
+
+ formatted_response = self.convert_json(
+ response, self.camel_to_underscore)
+
+ return formatted_response["response"]["items"]
+
+ def delete_ocean_cluster(self, ocean_id: str):
+ """
+ Delete a specified Ocean cluster.
+
+ # Arguments
+ ocean_id (String): ID of the Ocean Cluster
+
+ # Returns
+ (Object): Ocean API response
+ """
+ return self.send_delete(
+ url=self.__base_ocean_cluster_url + "/" + ocean_id,
+ entity_name="ocean"
+ )
+
+ def get_ocean_cluster(self, ocean_id: str):
+ """
+ Get the configuration for a specified Ocean cluster.
+
+ # Arguments
+ ocean_id (String): ID of the Ocean Cluster
+
+ # Returns
+ (Object): Ocean API response
+ """
+ response = self.send_get(
+ url=self.__base_ocean_cluster_url + "/" + ocean_id,
+ entity_name="ocean"
+ )
+
+ formatted_response = self.convert_json(
+ response, self.camel_to_underscore)
+
+ return formatted_response["response"]["items"][0]
+
+ def update_ocean_cluster(self, ocean_id: str, ocean: gcp_ocean.Ocean):
+ """
+ Update an existing Ocean Cluster
+
+ # Arguments
+ ocean_id (String): ID of the Ocean Cluster
+ ocean (Ocean): Ocean object
+
+ # Returns
+ (Object): Ocean API response
+ """
+ ocean = gcp_ocean.OceanRequest(ocean)
+
+ excluded_missing_dict = self.exclude_missing(
+ json.loads(ocean.toJSON()))
+
+ formatted_missing_dict = self.convert_json(
+ excluded_missing_dict, self.underscore_to_camel)
+
+ body_json = json.dumps(formatted_missing_dict)
+
+ response = self.send_put(
+ body=body_json,
+ url=self.__base_ocean_cluster_url + "/" + ocean_id,
+ entity_name='ocean')
+
+ formatted_response = self.convert_json(
+ response,
+ self.camel_to_underscore)
+
+ return formatted_response["response"]["items"][0]
+
+ def reimport_ocean_cluster(self, ocean_id: str):
+ """
+ Reimport the cluster's configuration from GKE.
+
+ # Arguments
+ ocean_id (String): ID of the Ocean Cluster
+ ocean (Ocean): Ocean object
+
+ # Returns
+ (Object): Reimport cluster response
+ """
+
+ response = self.send_put(
+ url=self.__base_ocean_cluster_url + "/" + ocean_id + '/reImport',
+ entity_name='ocean')
+
+ formatted_response = self.convert_json(
+ response,
+ self.camel_to_underscore)
+
+ return formatted_response["response"]["items"][0]
+
+ def get_elastilog(self, ocean_id: str, from_date: str, to_date: str, severity: str = None, resource_id: str = None,
+ limit: int = None):
+ """
+ Get group’s Elastilog by
+
+ # Arguments
+ to_date (String): end date value
+ from_date (String): beginning date value
+ severity(String) (Optional): Log level severity
+ resource_id(String) (Optional): specific resource identifier
+ limit(int) (Optional): Maximum number of lines to extract in a response
+
+ # Returns
+ (Object): Ocean Get Log API response
+ """
+ geturl = self.__base_ocean_cluster_url + "/" + ocean_id + "/log"
+ query_params = dict(toDate=to_date, fromDate=from_date, severity=severity,
+ resourceId=resource_id, limit=limit)
+
+ result = self.send_get(
+ url=geturl, entity_name='ocean_gcp_log', query_params=query_params)
+
+ formatted_response = self.convert_json(
+ result, self.camel_to_underscore)
+
+ return formatted_response["response"]["items"]
+
+ def get_rightsizing_recommendations(self, ocean_id: str, filter: gcp_ocean.RightSizingRecommendationFilter = None):
+ """
+ Get right-sizing recommendations for an Ocean cluster and filter them according to namespace or label.
+
+ # Arguments
+ ocean_id (String): Id of the Ocean Cluster
+ filter (RightSizingRecommendationFilter): Optional - may be null.
+
+ # Returns
+ (Object): Ocean API response
+ """
+ recommendation_request = gcp_ocean.RightSizingRecommendationRequest(
+ filter)
+
+ excluded_missing_dict = self.exclude_missing(
+ json.loads(recommendation_request.toJSON()))
+
+ formatted_missing_dict = self.convert_json(
+ excluded_missing_dict, self.underscore_to_camel)
+
+ body_json = json.dumps(formatted_missing_dict)
+
+ group_response = self.send_post(
+ body=body_json,
+ url=self.__base_ocean_cluster_url +
+ "/" + ocean_id + "/rightSizing/suggestion",
+ entity_name='ocean')
+
+ formatted_response = self.convert_json(
+ group_response, self.camel_to_underscore)
+
+ return formatted_response["response"]["items"]
+
+ def get_aggregated_cluster_costs(self, ocean_id: str, aggregated_cluster_costs: gcp_ocean.AggregatedClusterCosts):
+ """
+ Get aggregated cluster costs
+
+ # Arguments
+ ocean_id (String): ID of the Ocean Cluster
+ aggregated_cluster_costs (AggregatedClusterCosts): Aggregated Cluster Costs request
+
+ # Returns
+ (Object): Aggregated Cluster Costs API response
+ """
+ aggregated_cluster_costs_request = gcp_ocean.AggregatedClusterCostRequest(
+ aggregated_cluster_costs)
+
+ excluded_missing_dict = self.exclude_missing(
+ json.loads(aggregated_cluster_costs_request.toJSON()))
+
+ formatted_missing_dict = self.convert_json_with_list_of_lists(
+ excluded_missing_dict, self.underscore_to_camel)
+
+ body_json = json.dumps(formatted_missing_dict)
+
+ aggregated_costs_response = self.send_post(
+ body=body_json,
+ url=self.__base_ocean_cluster_url + "/" + ocean_id + "/aggregatedCosts",
+ entity_name='ocean (aggregated cluster costs)')
+
+ formatted_response = self.convert_json(
+ aggregated_costs_response, self.camel_to_underscore)
+
+ return formatted_response["response"]["items"][0]
+
+ def get_aggregated_summary_costs(self, ocean_id: str, aggregated_cluster_costs: gcp_ocean.AggregatedClusterCosts):
+ """
+ Get aggregated cluster costs
+
+ # Arguments
+ ocean_id (String): ID of the Ocean Cluster
+ aggregated_cluster_costs (AggregatedClusterCosts): Aggregated Cluster Costs request
+
+ # Returns
+ (Object): Aggregated Cluster Costs API response
+ """
+ aggregated_cluster_costs_request = gcp_ocean.AggregatedClusterCostRequest(
+ aggregated_cluster_costs)
+
+ excluded_missing_dict = self.exclude_missing(
+ json.loads(aggregated_cluster_costs_request.toJSON()))
+
+ formatted_missing_dict = self.convert_json_with_list_of_lists(
+ excluded_missing_dict, self.underscore_to_camel)
+
+ body_json = json.dumps(formatted_missing_dict)
+
+ aggregated_summary_costs_response = self.send_post(
+ body=body_json,
+ url=self.__base_ocean_cluster_url + "/" +
+ ocean_id + "/aggregatedCosts/summary",
+ entity_name='ocean (aggregated summary costs)')
+
+ formatted_response = self.convert_json(
+ aggregated_summary_costs_response, self.camel_to_underscore)
+
+ return formatted_response["response"]["items"][0]
+
+ def create_virtual_node_group(self, vng: gcp_ocean.VirtualNodeGroup):
+ """
+ Create a virtual node group.
+
+ # Arguments
+ vng (VirtualNodeGroup): VirtualNodeGroup Object
+
+ # Returns
+ (Object): Ocean Launch Spec response
+ """
+ ocean = gcp_ocean.VNGRequest(vng)
+
+ excluded_missing_dict = self.exclude_missing(
+ json.loads(ocean.toJSON()))
+
+ formatted_missing_dict = self.convert_json(
+ excluded_missing_dict, self.underscore_to_camel)
+
+ body_json = json.dumps(formatted_missing_dict)
+
+ response = self.send_post(
+ body=body_json,
+ url=self.__base_ocean_launchspec_url,
+ entity_name='ocean_gcp_vng')
+
+ formatted_response = self.convert_json(response,
+ self.camel_to_underscore)
+
+ return formatted_response["response"]["items"][0]
+
+ def get_all_virtual_node_groups(self, ocean_id: str):
+ """
+ List the configurations for all virtual node groups in the account
+ or in a specified cluster.
+
+ # Returns
+ (Object): Ocean VNG API response
+ """
+
+ response = self.send_get(
+ url=self.__base_ocean_launchspec_url,
+ entity_name="ocean_gcp_vng",
+ query_params=dict(oceanId=ocean_id)
+ )
+
+ formatted_response = self.convert_json(
+ response, self.camel_to_underscore)
+
+ return formatted_response["response"]["items"]
+
+ def import_gke_nodepool_to_vng_configuration(self, node_pool_name: str, ocean_id: str):
+ """
+ Import GKE Nodepool configurations and generate valid Ocean Virtual Node Group (VNG) configuration
+ which can be used to create VNGs
+
+ # Returns
+ (Object): Ocean API response
+ """
+
+ response = self.send_post_with_params(
+ body=None,
+ url=self.__base_ocean_launchspec_url + "/import",
+ entity_name='ocean_gcp_vng',
+ user_query_params=dict(nodePoolName=node_pool_name, oceanId=ocean_id))
+
+ formatted_response = self.convert_json(response,
+ self.camel_to_underscore)
+
+ return formatted_response["response"]["items"][0]
+
+ def delete_virtual_node_group(self, vng_id: str, delete_nodes: bool = None):
+ """
+ Delete an Ocean Cluster
+
+ # Arguments
+ vng_id (String): ID of the Ocean VNG
+ delete_nodes (Bool): When set to "true", all instances belonging to the deleted launch specification will be drained, detached, and terminated.
+
+ # Returns
+ (Object): Ocean Launch Specification Delete response
+ """
+ return self.send_delete_with_params(
+ url=self.__base_ocean_launchspec_url + "/" + vng_id,
+ entity_name="ocean_gcp_vng",
+ user_query_params=dict(deleteNodes=delete_nodes)
+ )
+
+ def update_virtual_node_group(self, vng_id: str, vng: gcp_ocean.VirtualNodeGroup):
+ """
+ Update an existing VNG inside an Ocean Cluster
+
+ # Arguments
+ vng_id (String): ID of the Ocean Virtual Node Group
+ ocean (Ocean): Ocean object
+
+ # Returns
+ (Object): Ocean Launch Spec response
+ """
+ ocean = gcp_ocean.VNGRequest(vng)
+
+ excluded_missing_dict = self.exclude_missing(
+ json.loads(ocean.toJSON()))
+
+ formatted_missing_dict = self.convert_json(
+ excluded_missing_dict, self.underscore_to_camel)
+
+ body_json = json.dumps(formatted_missing_dict)
+
+ response = self.send_put(
+ body=body_json,
+ url=self.__base_ocean_launchspec_url + "/" + vng_id,
+ entity_name='ocean_gcp_vng')
+
+ formatted_response = self.convert_json(
+ response,
+ self.camel_to_underscore)
+
+ return formatted_response["response"]["items"][0]
+
+ def get_virtual_node_group(self, ocean_launch_spec_id: str):
+ """
+ Get Virtual Node Group of the cluster
+
+ # Arguments
+ ocean_launch_spec_id (String): Ocean cluster launch specification identifier
+
+ # Returns
+ (Object): Ocean Allowed Instance Types response
+ """
+ response = self.send_get(
+ url=self.__base_ocean_launchspec_url + "/" + ocean_launch_spec_id,
+ entity_name="ocean_gcp_vng"
+ )
+
+ formatted_response = self.convert_json(
+ response, self.camel_to_underscore)
+
+ return formatted_response["response"]["items"]
+
+ def initiate_roll(self, ocean_id: str, cluster_roll: gcp_ocean.Roll):
+ """
+ Initiate Cluster Rolls
+
+ # Arguments
+ ocean_id (String): ID of the Ocean Cluster
+ cluster_roll (Roll): Cluster Roll / Roll with Instance Ids/ Launch specification Ids
+
+ # Returns
+ (Object): Cluster Roll API response
+ """
+ roll_request = gcp_ocean.ClusterRollInitiateRequest(cluster_roll)
+
+ excluded_missing_dict = self.exclude_missing(
+ json.loads(roll_request.toJSON()))
+
+ formatted_missing_dict = self.convert_json_with_list_of_lists(
+ excluded_missing_dict, self.underscore_to_camel)
+
+ body_json = json.dumps(formatted_missing_dict)
+
+ rolls_response = self.send_post(
+ body=body_json,
+ url=self.__base_ocean_cluster_url + "/" + ocean_id + "/roll",
+ entity_name='ocean (Cluster Roll)')
+
+ formatted_response = self.convert_json(
+ rolls_response, self.camel_to_underscore)
+
+ return formatted_response["response"]["items"][0]
+
+ def list_rolls(self, ocean_id: str):
+ """
+ Get status for all rolls of an Ocean cluster.
+
+ # Arguments
+ ocean_id (String): ID of the Ocean Cluster
+
+ # Returns
+ (Object): List of Cluster Roll API response
+ """
+ response = self.send_get(
+ url=self.__base_ocean_cluster_url + "/" + ocean_id + "/roll",
+ entity_name="ocean (Cluster Roll)"
+ )
+
+ formatted_response = self.convert_json(
+ response, self.camel_to_underscore)
+
+ return formatted_response["response"]["items"]
+
+ def update_roll(self, ocean_id: str, roll_id: str, status: str):
+ """
+ Update a roll of an Ocean cluster.
+ Performing the request will stop the next batch in a roll.
+
+ # Arguments
+ ocean_id (String): ID of the Ocean Cluster
+ roll_id (String): Ocean cluster roll identifier
+ update_roll (UpdateRoll): update roll request
+
+ # Returns
+ (Object): Cluster Roll API response
+ """
+ update_roll_request = gcp_ocean.ClusterRollUpdateRequest(status)
+
+ excluded_missing_dict = self.exclude_missing(
+ json.loads(update_roll_request.toJSON()))
+
+ formatted_missing_dict = self.convert_json(
+ excluded_missing_dict, self.underscore_to_camel)
+
+ body_json = json.dumps(formatted_missing_dict)
+
+ response = self.send_put(
+ body=body_json,
+ url=self.__base_ocean_cluster_url + "/" + ocean_id + "/roll/" + roll_id,
+ entity_name='ocean (Cluster Roll)')
+
+ formatted_response = self.convert_json(
+ response, self.camel_to_underscore)
+
+ return formatted_response["response"]["items"][0]
+
+ def get_roll(self, ocean_id: str, roll_id: str):
+ """
+ Get status for a roll of an Ocean cluster.
+
+ # Arguments
+ ocean_id (String): ID of the Ocean Cluster
+ account_id (String): The ID of the account associated with your token.
+ roll_id (String): Ocean cluster roll identifier
+
+ # Returns
+ (Object): Cluster Roll API response
+ """
+ response = self.send_get(
+ url=self.__base_ocean_cluster_url + "/" + ocean_id + "/roll/" + roll_id,
+ entity_name="ocean (Cluster Roll)"
+ )
+
+ formatted_response = self.convert_json(
+ response, self.camel_to_underscore)
+
+ return formatted_response["response"]["items"][0]
+
+ def get_cluster_nodes(self, ocean_id: str, instance_name: str = None, launch_spec_id: str = None):
+ """
+ Get nodes data of an Ocean cluster.
+
+ # Arguments
+ ocean_id (String): ID of the Ocean Cluster
+ instance_name (String): Get a specific node by instance id
+ launch_spec_id (String): Ocean cluster launch specification identifier.
+
+ # Returns
+ (Object): Ocean Kubernetes AWS Nodes Data response
+ """
+ query_params = dict(instanceName=instance_name,
+ launchSpecId=launch_spec_id)
+
+ response = self.send_get(
+ url=self.__base_ocean_cluster_url + "/" + ocean_id + "/nodes",
+ entity_name="ocean (Cluster Nodes)",
+ query_params=query_params
+ )
+
+ formatted_response = self.convert_json(
+ response, self.camel_to_underscore)
+
+ return formatted_response["response"]["items"]
+
+ def update_elastigroup_to_ocean(self, group_id: str):
+ """
+ Upgrade an Elastigroup with Kubernetes integration to Ocean for Kubernetes cluster.
+
+ # Arguments
+ group_id (str): Elastigroup identifier
+
+ # Returns
+ (Object): Ocean API response
+ """
+
+ query_params = dict(groupId=group_id)
+
+ response = self.send_post_with_params(
+ body=None,
+ url=self.__base_ocean_cluster_url + "/import",
+ entity_name='ocean_gcp_update_eg_to_ocean',
+ user_query_params=query_params)
+
+ formatted_response = self.convert_json(response,
+ self.camel_to_underscore)
+
+ return formatted_response["response"]["items"][0]
+
+ def import_gke_cluster_to_ocean(self, cluster_name: str, location: str,
+ import_gke_to_ocean: gcp_ocean.ImportGkeClusterToOcean,
+ include_launchSpecs: bool = None, node_pool_name: str = None):
+ """
+ Create an Ocean configuration according to an GKE Cluster configuration.
+
+ # Arguments
+ cluster_name (String): Name of the GKE Cluster.
+ include_launchSpecs (String): When set to "true", GKE cluster node pools will be imported to Ocean custom VNG ("customLaunchSpec") configurations.
+ location (String): Location GKE Cluster Master.
+ node_pool_name (String): Name of the Node Pool to use as a default for the Cluster configuration.
+ import_gke_to_ocean (ImportGkeClusterToOcean): ImportGkeClusterToOcean Object
+
+ # Returns
+ (Object): Ocean GKE Cluster Import Response
+ """
+ import_gke_to_ocean_body = gcp_ocean.ImportGkeClusterToOceanRequest(
+ import_gke_to_ocean)
+
+ excluded_missing_dict = self.exclude_missing(
+ json.loads(import_gke_to_ocean_body.toJSON()))
+
+ formatted_missing_dict = self.convert_json(
+ excluded_missing_dict, self.underscore_to_camel)
+
+ body_json = json.dumps(formatted_missing_dict)
+
+ geturl = self.__base_ocean_cluster_url + "/gke/import"
+
+ query_params = dict(
+ clusterName=cluster_name, includeLaunchSpecs=include_launchSpecs, location=location, nodePoolName=node_pool_name)
+
+ result = self.send_post_with_params(
+ body=body_json,
+ url=geturl,
+ entity_name='import_gke_cluster_to_ocean',
+ user_query_params=query_params)
+
+ formatted_response = self.convert_json(
+ result, self.camel_to_underscore)
+
+ return formatted_response["response"]["items"]
+
+ def launch_nodes_in_vng(self, ocean_launch_spec_id: str, amount: int):
+ """
+ Launch nodes in Virtual Node Group.
+
+ # Arguments
+ ocean_launch_spec_id (String): Ocean cluster launch specification identifier.
+ amount (int): The number of nodes to launch.
+
+ # Returns
+ (Object): Ocean Virtual Node Group Launch API response
+ """
+ launch_node_request = gcp_ocean.LaunchNodesRequest(amount)
+
+ excluded_missing_dict = self.exclude_missing(
+ json.loads(launch_node_request.toJSON()))
+
+ formatted_missing_dict = self.convert_json(
+ excluded_missing_dict, self.underscore_to_camel)
+
+ body_json = json.dumps(formatted_missing_dict)
+
+ response = self.send_put(
+ body=body_json,
+ url=self.__base_ocean_launchspec_url + "/" +
+ ocean_launch_spec_id + "/launchNodes",
+ entity_name='ocean (Cluster Roll)')
+
+ formatted_response = self.convert_json(
+ response, self.camel_to_underscore)
+
+ return formatted_response["response"]["items"][0]
diff --git a/spotinst_sdk2/models/ocean/gcp/__init__.py b/spotinst_sdk2/models/ocean/gcp/__init__.py
new file mode 100644
index 00000000..3ff084a8
--- /dev/null
+++ b/spotinst_sdk2/models/ocean/gcp/__init__.py
@@ -0,0 +1,1005 @@
+import json
+from enum import Enum
+from typing import List
+
+none = "d3043820717d74d9a17694c176d39733"
+
+
+# region AutoScaler
+class Down:
+ """
+ # Arguments
+ evaluation_periods: int
+ max_scale_down_percentage: int
+ """
+
+ def __init__(
+ self,
+ evaluation_periods: int = none,
+ max_scale_down_percentage: int = none
+ ):
+ self.evaluation_periods = evaluation_periods
+ self.max_scale_down_percentage = max_scale_down_percentage
+
+
+class Headroom:
+ """
+ # Arguments
+ cpu_per_unit: int
+ gpu_per_unit: int
+ memory_per_unit: int
+ num_of_units: int
+ """
+
+ def __init__(
+ self,
+ cpu_per_unit: int = none,
+ gpu_per_unit: int = none,
+ memory_per_unit: int = none,
+ num_of_units: int = none
+ ):
+ self.cpu_per_unit = cpu_per_unit
+ self.gpu_per_unit = gpu_per_unit
+ self.memory_per_unit = memory_per_unit
+ self.num_of_units = num_of_units
+
+
+class ResourceLimits:
+ """
+ # Arguments
+ max_memory_gib: int
+ max_v_cpu: int
+ """
+
+ def __init__(
+ self,
+ max_memory_gib: int = none,
+ max_v_cpu: int = none
+ ):
+ self.max_memory_gib = max_memory_gib
+ self.max_v_cpu = max_v_cpu
+
+
+class AutoScaler:
+ """
+ # Arguments
+ auto_headroom_percentage: int
+ cooldown: int
+ down: Down
+ enable_automatic_and_manual_headroom: bool
+ headroom: Headroom
+ is_auto_config: bool
+ is_enabled: bool
+ resource_limits: ResourceLimits
+ """
+
+ def __init__(
+ self,
+ auto_headroom_percentage: int = none,
+ cooldown: int = none,
+ down: Down = none,
+ enable_automatic_and_manual_headroom: bool = none,
+ headroom: Headroom = none,
+ is_auto_config: bool = none,
+ is_enabled: bool = none,
+ resource_limits: ResourceLimits = none
+ ):
+ self.auto_headroom_percentage = auto_headroom_percentage
+ self.cooldown = cooldown
+ self.down = down
+ self.enable_automatic_and_manual_headroom = enable_automatic_and_manual_headroom
+ self.headroom = headroom
+ self.is_auto_config = is_auto_config
+ self.is_enabled = is_enabled
+ self.resource_limits = resource_limits
+# endregion
+
+
+# region Capacity
+class Capacity:
+ """
+ # Arguments
+ maximum: int
+ minimum: int
+ target: int
+ """
+
+ def __init__(
+ self,
+ maximum: int = none,
+ minimum: int = none,
+ target: int = none
+ ):
+ self.maximum = maximum
+ self.minimum = minimum
+ self.target = target
+# endregion
+
+
+# region Compute
+class LocationType(Enum):
+ regional = "regional"
+ _global = "global"
+
+
+class NamedPorts:
+ """
+ # Arguments
+ name: str
+ ports: List[int]
+ """
+
+ def __init__(
+ self,
+ name: str = none,
+ ports: List[int] = none):
+ self.name = name
+ self.ports = ports
+
+
+class Scheme(Enum):
+ external = "EXTERNAL"
+ internal = "INTERNAL"
+
+
+class BackendServices:
+ """
+ # Arguments
+ backend_service_name: str
+ location_type: LocationType
+ named_ports: NamedPorts
+ scheme: Scheme
+ """
+
+ def __init__(
+ self,
+ backend_service_name: str = none,
+ location_type: LocationType = none,
+ named_ports: NamedPorts = none,
+ scheme: Scheme = none
+ ):
+ self.backend_service_name = backend_service_name
+ self.location_type = location_type
+ self.named_ports = named_ports
+ self.scheme = scheme
+
+
+class InstanceTypes:
+ """
+ # Arguments
+ blacklist: List[str]
+ whitelist: List[str]
+ """
+
+ def __init__(
+ self,
+ blacklist: List[str] = none,
+ whitelist: List[str] = none):
+ self.blacklist = blacklist
+ self.whitelist = whitelist
+
+
+class Labels:
+ """
+ # Arguments
+ key: str
+ value: str
+ """
+
+ def __init__(
+ self,
+ key: str = none,
+ value: str = none):
+ self.key = key
+ self.value = value
+
+
+class Metadata:
+ """
+ # Arguments
+ key: str
+ value: str
+ """
+
+ def __init__(
+ self,
+ key: str = none,
+ value: str = none):
+ self.key = key
+ self.value = value
+
+
+class RootVolumeType(Enum):
+ pd_standard = "pd-standard"
+ pd_ssd = "pd-ssd"
+
+
+class ShieldedInstanceConfig:
+ """
+ # Arguments
+ enable_integrity_monitoring: bool
+ enable_secure_boot: bool
+ """
+
+ def __init__(
+ self,
+ enable_integrity_monitoring: bool = none,
+ enable_secure_boot: bool = none):
+ self.enable_integrity_monitoring = enable_integrity_monitoring
+ self.enable_secure_boot = enable_secure_boot
+
+
+class LaunchSpecification:
+ """
+ # Arguments
+ ip_forwarding: bool
+ labels: List[Labels]
+ metadata: List[Metadata]
+ min_cpu_platform: str
+ root_volume_size_in_gb: int
+ root_volume_type: RootVolumeType
+ service_account: str
+ shielded_instance_config: ShieldedInstanceConfig
+ source_image: str
+ tags: List[str]
+ use_as_template_only: bool
+ """
+
+ def __init__(
+ self,
+ ip_forwarding: bool = none,
+ labels: List[Labels] = none,
+ metadata: List[Metadata] = none,
+ min_cpu_platform: str = none,
+ root_volume_size_in_gb: int = none,
+ root_volume_type: RootVolumeType = none,
+ service_account: str = none,
+ shielded_instance_config: ShieldedInstanceConfig = none,
+ source_image: str = none,
+ tags: List[str] = none,
+ use_as_template_only: bool = none
+ ):
+ self.ip_forwarding = ip_forwarding
+ self.labels = labels
+ self.metadata = metadata
+ self.min_cpu_platform = min_cpu_platform
+ self.root_volume_size_in_gb = root_volume_size_in_gb
+ self.root_volume_type = root_volume_type
+ self.service_account = service_account
+ self.shielded_instance_config = shielded_instance_config
+ self.source_image = source_image
+ self.tags = tags
+ self.use_as_template_only = use_as_template_only
+
+
+class AccessConfigs:
+ """
+ # Arguments
+ name: str
+ type: str
+ """
+
+ def __init__(
+ self,
+ name: str = none,
+ type: str = none
+ ):
+ self.name = name
+ self.type = type
+
+
+class AliasIpRanges:
+ """
+ # Arguments
+ ip_cidr_range: str
+ subnetwork_range_name: str
+ """
+
+ def __init__(
+ self,
+ ip_cidr_range: str = none,
+ subnetwork_range_name: str = none
+ ):
+ self.ip_cidr_range = ip_cidr_range
+ self.subnetwork_range_name = subnetwork_range_name
+
+
+class NetworkInterfaces:
+ """
+ # Arguments
+ access_configs: List[AccessConfigs]
+ alias_ip_ranges: List[AliasIpRanges]
+ network: str
+ project_id: str
+ """
+
+ def __init__(
+ self,
+ access_configs: List[AccessConfigs] = none,
+ alias_ip_ranges: List[AliasIpRanges] = none,
+ network: str = none,
+ project_id: str = none,
+ ):
+ self.access_configs = access_configs
+ self.alias_ip_ranges = alias_ip_ranges
+ self.network = network
+ self.project_id = project_id
+
+
+class Compute:
+ """
+ # Arguments
+ availability_zones: List[str]
+ backend_services: BackendServices
+ instance_types: InstanceTypes
+ launch_specification: LaunchSpecification
+ network_interfaces: List[NetworkInterfaces]
+ subnet_name: str
+ """
+
+ def __init__(
+ self,
+ availability_zones: List[str] = none,
+ backend_services: BackendServices = none,
+ instance_types: InstanceTypes = none,
+ launch_specification: LaunchSpecification = none,
+ network_interfaces: List[NetworkInterfaces] = none,
+ subnet_name: str = none
+ ):
+ self.availability_zones = availability_zones
+ self.backend_services = backend_services
+ self.instance_types = instance_types
+ self.launch_specification = launch_specification
+ self.network_interfaces = network_interfaces
+ self.subnet_name = subnet_name
+# endregion
+
+
+# region GKE
+class GKE:
+ """
+ # Arguments
+ cluster_name: str
+ master_location: str
+ """
+
+ def __init__(
+ self,
+ cluster_name: str = none,
+ master_location: str = none
+ ):
+ self.cluster_name = cluster_name
+ self.master_location = master_location
+# endregion
+
+
+# region Scheduling
+class ShutdownHours:
+ """
+ # Arguments
+ is_enabled: bool
+ time_windows: List[str]
+ """
+
+ def __init__(
+ self,
+ is_enabled: bool = none,
+ time_windows: List[str] = none
+ ):
+ self.is_enabled = is_enabled
+ self.time_windows = time_windows
+
+
+class ClusterRoll:
+ """
+ # Arguments
+ batch_min_healthy_percentage: int
+ batch_size_percentage: int
+ comment: str
+ respect_pdb: bool
+ """
+
+ def __init__(
+ self,
+ batch_min_healthy_percentage: int = none,
+ batch_size_percentage: int = none,
+ comment: str = none,
+ respect_pdb: bool = none
+ ):
+ self.batch_min_healthy_percentage = batch_min_healthy_percentage
+ self.batch_size_percentage = batch_size_percentage
+ self.comment = comment
+ self.respect_pdb = respect_pdb
+
+
+class Parameters:
+ """
+ # Arguments
+ cluster_roll: ClusterRoll
+ """
+
+ def __init__(
+ self,
+ cluster_roll: ClusterRoll = none):
+ self.cluster_roll = cluster_roll
+
+
+class Tasks:
+ """
+ # Arguments
+ cron_expression: str
+ is_enabled: bool
+ parameters: Parameters
+ task_type: str
+ """
+
+ def __init__(
+ self,
+ cron_expression: str = none,
+ is_enabled: bool = none,
+ parameters: Parameters = none,
+ task_type: str = none
+ ):
+ self.cron_expression = cron_expression
+ self.is_enabled = is_enabled
+ self.parameters = parameters
+ self.task_type = task_type
+
+
+class Scheduling:
+ """
+ # Arguments
+ shutdown_hours: ShutdownHours
+ tasks: List[Tasks]
+ """
+
+ def __init__(
+ self,
+ shutdown_hours: ShutdownHours = none,
+ tasks: List[Tasks] = none):
+ self.shutdown_hours = shutdown_hours
+ self.tasks = tasks
+# endregion
+
+
+# region Security
+class ContainerImage:
+ """
+ approved_images: List[str]
+ """
+
+ def __init__(
+ self,
+ approved_images: List[str] = none
+ ):
+ self.approved_images = approved_images
+
+
+class Security:
+ """
+ # Arguments
+ container_image: ContainerImage
+ """
+
+ def __init__(
+ self,
+ container_image: ContainerImage = none):
+ self.container_image = container_image
+# endregion
+
+
+# region Strategy
+class ProvisioningModel(Enum):
+ spot = "SPOT"
+ preemptible = "PREEMPTIBLE"
+
+
+class Strategy:
+ """
+ # Arguments
+ draining_timeout: int
+ preemptible_percentage: int
+ provisioning_model: ProvisioningModel
+ """
+
+ def __init__(
+ self,
+ draining_timeout: int = none,
+ preemptible_percentage: int = none,
+ provisioning_model: ProvisioningModel = none
+ ):
+ self.draining_timeout = draining_timeout
+ self.preemptible_percentage = preemptible_percentage
+ self.provisioning_model = provisioning_model
+# endregion
+
+
+# region Ocean
+class Ocean:
+ """
+ # Arguments
+ auto_scaler: AutoScaler
+ capacity: Capacity
+ compute: Compute
+ controller_cluster_id: str
+ gke: GKE
+ name: str
+ scheduling: Scheduling
+ security: Security
+ strategy: Strategy
+ """
+
+ def __init__(
+ self,
+ auto_scaler: AutoScaler = none,
+ capacity: Capacity = none,
+ compute: Compute = none,
+ controller_cluster_id: str = none,
+ gke: GKE = none,
+ name: str = none,
+ scheduling: Scheduling = none,
+ security: Security = none,
+ strategy: Strategy = none
+ ):
+ self.auto_scaler = auto_scaler
+ self.capacity = capacity
+ self.compute = compute
+ self.controller_cluster_id = controller_cluster_id
+ self.gke = gke
+ self.name = name
+ self.scheduling = scheduling
+ self.security = security
+ self.strategy = strategy
+# endregion
+
+
+# region OceanRequest
+class OceanRequest:
+ def __init__(self, ocean: Ocean):
+ self.cluster = ocean
+
+ def toJSON(self):
+ return json.dumps(self, default=lambda o: o.__dict__,
+ sort_keys=True, indent=4)
+# endregion
+
+
+class Type(Enum):
+ label = "label"
+ annotation = "annotation"
+
+
+class Operator(Enum):
+ equals = "equals"
+ not_equals = "notEquals"
+ exists = "exists"
+ does_not_exist = "doesNotExist"
+
+
+class Attribute:
+ """
+ # Arguments
+ type: Type
+ key: str
+ operator: Operator
+ value: str
+ """
+
+ def __init__(
+ self,
+ type: Type = none,
+ key: str = none,
+ operator: Operator = none,
+ value: str = none):
+ self.type = type
+ self.key = key
+ self.operator = operator
+ self.value = value
+
+
+class RightSizingRecommendationFilter:
+ """
+ # Attribute
+ namespaces: List[str]
+ attribute: Attribute
+ """
+
+ def __init__(
+ self,
+ namespaces: List[str] = none,
+ attribute: Attribute = none):
+ self.namespaces = namespaces
+ self.attribute = attribute
+
+
+class RightSizingRecommendationRequest:
+ def __init__(self, filter: RightSizingRecommendationFilter = none):
+ self.filter = filter
+
+ def toJSON(self):
+ return json.dumps(self, default=lambda o: o.__dict__,
+ sort_keys=True, indent=4)
+
+
+class AllMatch:
+ """
+ # Arguments
+ all_match: List[Attribute]
+ """
+
+ def __init__(
+ self,
+ all_match: List[Attribute] = none):
+ self.all_match = all_match
+
+
+class Conditions:
+ """
+ # Arguments
+ any_match: List[AllMatch]
+ """
+
+ def __init__(
+ self,
+ any_match: List[AllMatch] = none):
+ self.any_match = any_match
+
+
+class Scope(Enum):
+ namespace = "namespace"
+ resource = "resource"
+
+
+class Filter:
+ """
+ # Arguments
+ conditions: Conditions
+ scope: Scope
+ """
+
+ def __init__(
+ self,
+ conditions: Conditions = none,
+ scope: Scope = none):
+ self.conditions = conditions
+ self.scope = scope
+
+
+class GroupBy(Enum):
+ namespace = "namespace"
+ namespace_label = "namespace.label.${labelKey}"
+ resource_label = "resource.label.${labelKey}"
+ namespace_annotation = "namespace.annotation.${annotationKey}"
+ resource_annotation = "resource.annotation.${annotationKey}"
+
+
+class AggregatedClusterCosts:
+ """
+ # Arguments
+ end_time: str
+ aggregated_filter: Filter
+ group_by: GroupBy
+ start_time: str
+ """
+
+ def __init__(
+ self,
+ end_time: str = none,
+ aggregated_filter: Filter = none,
+ group_by: GroupBy = GroupBy.namespace.value,
+ start_time: str = none):
+ self.end_time = end_time
+ self.aggregated_filter = aggregated_filter
+ self.group_by = group_by
+ self.start_time = start_time
+
+
+class AggregatedClusterCostRequest:
+ def __init__(self, aggregated_cluster_costs: AggregatedClusterCosts = none):
+ self.end_time = aggregated_cluster_costs.end_time
+ self.start_time = aggregated_cluster_costs.start_time
+ self.group_by = aggregated_cluster_costs.group_by
+ self.filter = aggregated_cluster_costs.aggregated_filter
+
+ def toJSON(self):
+ return json.dumps(self, default=lambda o: o.__dict__,
+ sort_keys=True, indent=4)
+
+
+# region VirtualNodeGroup
+class AutoScale:
+ """
+ # Arguments
+ auto_headroom_percentage: int
+ headrooms: List[Headroom]
+ down: Down
+ """
+
+ def __init__(
+ self,
+ auto_headroom_percentage: int = none,
+ headrooms: List[Headroom] = none,
+ down: Down = none
+ ):
+ self.auto_headroom_percentage = auto_headroom_percentage
+ self.headrooms = headrooms
+ self.down = down
+
+
+class VNGResourceLimits:
+ """
+ # Arguments
+ max_instance_count: int
+ min_instance_count: int
+ """
+
+ def __init__(
+ self,
+ max_instance_count: int = none,
+ min_instance_count: int = none
+ ):
+ self.max_instance_count = max_instance_count
+ self.min_instance_count = min_instance_count
+
+
+class Config:
+ """
+ Arguments
+ headrooms: List[Headroom]
+ """
+
+ def __init__(self,
+ headrooms: List[Headroom] = none):
+ self.headrooms = headrooms
+
+
+class VNGTasks:
+ """
+ # Arguments
+ config: Config
+ cron_expression: str
+ is_enabled: bool
+ task_type: str
+ """
+
+ def __init__(
+ self,
+ config: Config = none,
+ cron_expression: str = none,
+ is_enabled: bool = none,
+ task_type: str = none
+ ):
+ self.config = config
+ self.cron_expression = cron_expression
+ self.is_enabled = is_enabled
+ self.task_type = task_type
+
+
+class VNGScheduling:
+ """
+ # Arguments
+ tasks: List[VNGTasks]
+ """
+
+ def __init__(
+ self,
+ tasks: List[VNGTasks]):
+ self.tasks = tasks
+
+
+class Storage:
+ """
+ # Arguments
+ local_ssd_count: int
+ """
+
+ def __init__(
+ self,
+ local_ssd_count: int = none):
+ self.local_ssd_count = local_ssd_count
+
+
+class VNGStrategy:
+ """
+ # Arguments
+ preemptible_percentage: int
+ """
+
+ def __init__(
+ self,
+ preemptible_percentage: int = none
+ ):
+ self.preemptible_percentage = preemptible_percentage
+
+
+class Taints:
+ """
+ # Arguments
+ effect: str
+ key: str
+ value: str
+ """
+
+ def __init__(
+ self,
+ effect: str = none,
+ key: str = none,
+ value: str = none
+ ):
+ self.effect = effect
+ self.key = key
+ self.value = value
+
+
+class VirtualNodeGroup:
+ """
+ # Arguments
+ auto_scale: AutoScale
+ availability_zones: List[str]
+ instance_types: List[str]
+ labels: List[Labels]
+ metadata: List[Metadata]
+ name: str
+ network_interfaces: List[NetworkInterfaces]
+ ocean_id: str
+ resource_limits: ResourceLimits
+ restrict_scale_down: bool
+ root_volume_size_in_gb: int
+ root_volume_type: str
+ scheduling: Scheduling
+ service_account: str
+ shielded_instance_config: ShieldedInstanceConfig
+ source_image: str
+ storage: Storage
+ strategy: VNGStrategy
+ tags: List[str]
+ taints: List[Taints]
+ """
+
+ def __init__(
+ self,
+ auto_scale: AutoScale = none,
+ availability_zones: List[str] = none,
+ instance_types: List[str] = none,
+ labels: List[Labels] = none,
+ metadata: List[Metadata] = none,
+ name: str = none,
+ network_interfaces: List[NetworkInterfaces] = none,
+ ocean_id: str = none,
+ resource_limits: VNGResourceLimits = none,
+ restrict_scale_down: bool = none,
+ root_volume_size_in_gb: int = none,
+ root_volume_type: str = none,
+ scheduling: VNGScheduling = none,
+ service_account: str = none,
+ shielded_instance_config: ShieldedInstanceConfig = none,
+ source_image: str = none,
+ storage: Storage = none,
+ strategy: VNGStrategy = none,
+ tags: List[str] = none,
+ taints: List[Taints] = none
+ ):
+ self.auto_scale = auto_scale
+ self.availability_zones = availability_zones
+ self.instance_types = instance_types
+ self.labels = labels
+ self.metadata = metadata
+ self.name = name
+ self.network_interfaces = network_interfaces
+ self.ocean_id = ocean_id
+ self.resource_limits = resource_limits
+ self.restrict_scale_down = restrict_scale_down
+ self.root_volume_size_in_gb = root_volume_size_in_gb
+ self.root_volume_type = root_volume_type
+ self.scheduling = scheduling
+ self.service_account = service_account
+ self.shielded_instance_config = shielded_instance_config
+ self.source_image = source_image
+ self.storage = storage
+ self.strategy = strategy
+ self.tags = tags
+ self.taints = taints
+# endregion
+
+
+class VNGRequest:
+ def __init__(self, vng: VirtualNodeGroup):
+ self.launch_spec = vng
+
+ def toJSON(self):
+ return json.dumps(self, default=lambda o: o.__dict__,
+ sort_keys=True, indent=4)
+
+
+class Roll:
+ """
+ # Arguments
+ batch_min_healthy_percentage: int
+ batch_size_percentage: int
+ comment: str
+ instance_names: List[str]
+ launch_spec_ids: List[str]
+ respect_pdb: bool
+ """
+
+ def __init__(
+ self,
+ batch_min_healthy_percentage: int = none,
+ batch_size_percentage: int = none,
+ comment: str = none,
+ launch_spec_ids: List[str] = none,
+ instance_ids: List[str] = none,
+ respect_pdb: bool = none):
+ self.batch_min_healthy_percentage = batch_min_healthy_percentage
+ self.batch_size_percentage = batch_size_percentage
+ self.comment = comment
+ self.instance_ids = instance_ids
+ self.launch_spec_ids = launch_spec_ids
+ self.respect_pdb = respect_pdb
+
+
+class ClusterRollInitiateRequest:
+ def __init__(self, roll: Roll = none):
+ self.roll = roll
+
+ def toJSON(self):
+ return json.dumps(self, default=lambda o: o.__dict__,
+ sort_keys=True, indent=4)
+
+
+class ClusterRollUpdateRequest:
+ def __init__(self, status: str = none):
+ self.roll = dict(status=status)
+
+ def toJSON(self):
+ return json.dumps(self, default=lambda o: o.__dict__,
+ sort_keys=True, indent=4)
+
+
+class ImportGkeClusterToOcean:
+ """
+ # Arguments
+ auto_scaler: AutoScaler
+ availability_zones: List[str]
+ backend_services: List[BackendServices]
+ capacity: Capacity
+ controller_cluster_id: str
+ instance_types: InstanceTypes
+ name: str
+ """
+
+ def __init__(
+ self,
+ auto_scaler: AutoScaler = none,
+ availability_zones: List[str] = none,
+ backend_services: List[BackendServices] = none,
+ capacity: Capacity = none,
+ controller_cluster_id: str = none,
+ instance_types: InstanceTypes = none,
+ name: str = none
+ ):
+ self.auto_scaler = auto_scaler
+ self.availability_zones = availability_zones
+ self.backend_services = backend_services
+ self.capacity = capacity
+ self.controller_cluster_id = controller_cluster_id
+ self.instance_types = instance_types
+ self.name = name
+
+
+class ImportGkeClusterToOceanRequest:
+ def __init__(self, cluster: ImportGkeClusterToOcean = none):
+ self.cluster = cluster
+
+ def toJSON(self):
+ return json.dumps(self, default=lambda o: o.__dict__,
+ sort_keys=True, indent=4)
+
+
+class LaunchNodesRequest:
+ def __init__(self, amount: int = none):
+ self.launch_request = dict(amount=amount)
+
+ def toJSON(self):
+ return json.dumps(self, default=lambda o: o.__dict__,
+ sort_keys=True, indent=4)
diff --git a/spotinst_sdk2/version.py b/spotinst_sdk2/version.py
index 4eb28e38..7f5601d9 100644
--- a/spotinst_sdk2/version.py
+++ b/spotinst_sdk2/version.py
@@ -1 +1 @@
-__version__ = '3.0.0'
+__version__ = '3.1.0'