diff --git a/google/_modules/gke/cluster.tf b/google/_modules/gke/cluster.tf index 94acd35..9bdfc2c 100644 --- a/google/_modules/gke/cluster.tf +++ b/google/_modules/gke/cluster.tf @@ -74,15 +74,15 @@ resource "google_container_cluster" "current" { } dynamic "maintenance_exclusion" { - for_each = var.maintenance_exclusions + for_each = var.maintenance_exclusion_start_time != null ? [1] : [] content { - start_time = maintenance_exclusion.value.start_time - end_time = maintenance_exclusion.value.end_time - exclusion_name = maintenance_exclusion.value.exclusion_name + start_time = var.maintenance_exclusion_start_time + end_time = var.maintenance_exclusion_end_time + exclusion_name = var.maintenance_exclusion_name exclusion_options { - scope = maintenance_exclusion.value.scope + scope = var.maintenance_exclusion_scope } } } diff --git a/google/_modules/gke/node_pool/main.tf b/google/_modules/gke/node_pool/main.tf index 8b90f45..9504cfa 100644 --- a/google/_modules/gke/node_pool/main.tf +++ b/google/_modules/gke/node_pool/main.tf @@ -14,6 +14,16 @@ resource "google_container_node_pool" "current" { node_locations = var.node_locations + dynamic "network_config" { + for_each = var.network_config == null ? [] : [1] + + content { + enable_private_nodes = var.network_config["enable_private_nodes"] + create_pod_range = var.network_config["create_pod_range"] + pod_ipv4_cidr_block = var.network_config["pod_ipv4_cidr_block"] + } + } + # # # Node config @@ -31,7 +41,7 @@ resource "google_container_node_pool" "current" { labels = merge(var.labels, var.metadata_labels) - tags = var.metadata_tags + tags = concat(var.metadata_tags, var.instance_tags) workload_metadata_config { mode = var.node_workload_metadata_config diff --git a/google/_modules/gke/node_pool/variables.tf b/google/_modules/gke/node_pool/variables.tf index df6fbaa..bda8659 100644 --- a/google/_modules/gke/node_pool/variables.tf +++ b/google/_modules/gke/node_pool/variables.tf @@ -119,6 +119,12 @@ variable "taints" { default = null } +variable "instance_tags" { + type = list(string) + description = "List of instance tags to apply to nodes." + default = [] +} + variable "node_locations" { type = list(string) description = "List of zones in the cluster's region to start worker nodes in. Defaults to cluster's node locations." @@ -152,3 +158,26 @@ variable "labels" { description = "Kubernetes labels to set on the nodes created by the node pool. Merged with Kubestack default labels." default = {} } + +variable "network_config" { + type = object({ + enable_private_nodes = bool + create_pod_range = bool + pod_ipv4_cidr_block = string + }) + description = "Additional network configuration for the node pool." +} + +variable "ephemeral_storage_local_ssd_config" { + type = object({ + local_ssd_count = number + }) + description = "`ephemeral_storage_local_ssd_config` block, useful for node groups with local SSD. Defaults to `null`" + default = null +} + +variable "labels" { + type = map(string) + description = "Kubernetes labels to set on the nodes created by the node pool. Merged with Kubestack default labels." + default = {} +} diff --git a/google/_modules/gke/variables.tf b/google/_modules/gke/variables.tf index dc25141..a06143e 100644 --- a/google/_modules/gke/variables.tf +++ b/google/_modules/gke/variables.tf @@ -53,14 +53,24 @@ variable "daily_maintenance_window_start_time" { description = "Start time of the daily maintenance window." } -variable "maintenance_exclusions" { - type = list(object({ - start_time = string - end_time = string - exclusion_name = string - scope = string - })) - description = "List of maintenance exclusion configuration to be set on the cluster." +variable "maintenance_exclusion_start_time" { + type = string + description = "Maintenance exclusion start time" +} + +variable "maintenance_exclusion_end_time" { + type = string + description = "Maintenance exclusion end time" +} + +variable "maintenance_exclusion_name" { + type = string + description = "Maintenance exclusion name" +} + +variable "maintenance_exclusion_scope" { + type = string + description = "Maintenance exclusion scope" } variable "remove_default_node_pool" { diff --git a/google/cluster/configuration.tf b/google/cluster/configuration.tf index 4fc9553..f33be75 100644 --- a/google/cluster/configuration.tf +++ b/google/cluster/configuration.tf @@ -30,7 +30,11 @@ locals { "cluster_daily_maintenance_window_start_time", "03:00", ) - cluster_maintenance_exclusions = lookup(local.cfg, "cluster_maintenance_exclusions", []) + + cluster_maintenance_exclusion_start_time = lookup(local.cfg, "cluster_maintenance_exclusion_start_time", "") + cluster_maintenance_exclusion_end_time = lookup(local.cfg, "cluster_maintenance_exclusion_end_time", "") + cluster_maintenance_exclusion_name = lookup(local.cfg, "cluster_maintenance_exclusion_name", "") + cluster_maintenance_exclusion_scope = lookup(local.cfg, "cluster_maintenance_exclusion_scope", "") remove_default_node_pool = lookup(local.cfg, "remove_default_node_pool", true) diff --git a/google/cluster/main.tf b/google/cluster/main.tf index d71dee8..8c39748 100644 --- a/google/cluster/main.tf +++ b/google/cluster/main.tf @@ -27,7 +27,11 @@ module "cluster" { release_channel = local.cluster_release_channel daily_maintenance_window_start_time = local.cluster_daily_maintenance_window_start_time - maintenance_exclusions = local.cluster_maintenance_exclusions + + maintenance_exclusion_start_time = local.cluster_maintenance_exclusion_start_time + maintenance_exclusion_end_time = local.cluster_maintenance_exclusion_end_time + maintenance_exclusion_name = local.cluster_maintenance_exclusion_name + maintenance_exclusion_scope = local.cluster_maintenance_exclusion_scope remove_default_node_pool = local.remove_default_node_pool diff --git a/google/cluster/node-pool/configuration.tf b/google/cluster/node-pool/configuration.tf index 012458b..523be54 100644 --- a/google/cluster/node-pool/configuration.tf +++ b/google/cluster/node-pool/configuration.tf @@ -42,5 +42,12 @@ locals { ephemeral_storage_local_ssd_config = local.cfg["ephemeral_storage_local_ssd_config"] + guest_accelerator = local.cfg["guest_accelerator"] + network_config = local.cfg["network_config"] + + instance_tags = local.cfg["instance_tags"] + + ephemeral_storage_local_ssd_config = local.cfg["ephemeral_storage_local_ssd_config"] + guest_accelerator = local.cfg["guest_accelerator"] } diff --git a/google/cluster/node-pool/main.tf b/google/cluster/node-pool/main.tf index 23b0e80..923024e 100644 --- a/google/cluster/node-pool/main.tf +++ b/google/cluster/node-pool/main.tf @@ -30,7 +30,8 @@ module "node_pool" { node_workload_metadata_config = local.node_workload_metadata_config - taints = local.taints + taints = local.taints + instance_tags = local.instance_tags labels = local.labels @@ -40,4 +41,10 @@ module "node_pool" { ephemeral_storage_local_ssd_config = local.ephemeral_storage_local_ssd_config guest_accelerator = local.guest_accelerator + + network_config = local.network_config + + ephemeral_storage_local_ssd_config = local.ephemeral_storage_local_ssd_config + + guest_accelerator = local.guest_accelerator } diff --git a/google/cluster/node-pool/variables.tf b/google/cluster/node-pool/variables.tf index 090a159..4172b2d 100644 --- a/google/cluster/node-pool/variables.tf +++ b/google/cluster/node-pool/variables.tf @@ -29,6 +29,8 @@ variable "configuration" { labels = optional(map(string)) + labels = optional(map(string)) + extra_oauth_scopes = optional(list(string)) node_workload_metadata_config = optional(string) @@ -48,6 +50,28 @@ variable "configuration" { max_shared_clients_per_gpu = optional(number) })) })) + + network_config = optional(object({ + enable_private_nodes = bool + create_pod_range = bool + pod_ipv4_cidr_block = string + })) + + instance_tags = optional(list(string)) + + ephemeral_storage_local_ssd_config = optional(object({ + local_ssd_count = number + })) + + guest_accelerator = optional(object({ + type = string + count = number + gpu_partition_size = optional(string) + gpu_sharing_config = optional(object({ + gpu_sharing_strategy = optional(string) + max_shared_clients_per_gpu = optional(number) + })) + })) })) description = "Map with per workspace cluster configuration."