-
Notifications
You must be signed in to change notification settings - Fork 1.3k
feat: Support for GKE private clusters without default node pool #2408
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
4eddbb7
d4de152
ea54925
93aeeea
a745e01
d719cc6
7161fb6
1f9c0d2
b2ac429
9bb1b1f
0e1d5bd
677fe4b
56759d1
170af42
f9c0edd
a28df9d
d0c0041
02e0e2c
293b955
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -45,6 +45,8 @@ resource "google_container_cluster" "primary" { | |
| } | ||
|
|
||
| {% if autopilot_cluster != true %} | ||
| initial_node_count = var.remove_default_node_pool || length(var.node_pools) > 0 ? null : max(1, var.initial_node_count) | ||
|
|
||
| dynamic "network_policy" { | ||
| for_each = local.cluster_network_policy | ||
|
|
||
|
|
@@ -644,121 +646,124 @@ resource "google_container_cluster" "primary" { | |
| delete = lookup(var.timeouts, "delete", "45m") | ||
| } | ||
| {% if autopilot_cluster != true %} | ||
| node_pool { | ||
| name = "default-pool" | ||
| initial_node_count = var.initial_node_count | ||
|
|
||
| management { | ||
| auto_repair = lookup(var.cluster_autoscaling, "auto_repair", true) | ||
| auto_upgrade = lookup(var.cluster_autoscaling, "auto_upgrade", true) | ||
| } | ||
|
|
||
| node_config { | ||
| image_type = lookup(var.node_pools[0], "image_type", "COS_CONTAINERD") | ||
| machine_type = lookup(var.node_pools[0], "machine_type", "e2-medium") | ||
| min_cpu_platform = lookup(var.node_pools[0], "min_cpu_platform", "") | ||
| enable_confidential_storage = lookup(var.node_pools[0], "enable_confidential_storage", false) | ||
| disk_type = lookup(var.node_pools[0], "disk_type", null) | ||
| dynamic "gcfs_config" { | ||
| for_each = lookup(var.node_pools[0], "enable_gcfs", null) != null ? [var.node_pools[0].enable_gcfs] : [] | ||
| content { | ||
| enabled = gcfs_config.value | ||
| } | ||
| dynamic "node_pool" { | ||
| for_each = var.remove_default_node_pool || length(var.node_pools) == 0 ? [] : [1] | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The logic for creating the default node pool is split into two different paths based on whether
This dual-path logic is complex and can lead to inconsistent configurations for the default node pool depending on the state of Consider simplifying this by using a single, consistent mechanism. For example, you could always use this |
||
| content { | ||
| name = "default-pool" | ||
| initial_node_count = max(1, var.initial_node_count) | ||
|
|
||
| management { | ||
| auto_repair = lookup(var.cluster_autoscaling, "auto_repair", true) | ||
| auto_upgrade = lookup(var.cluster_autoscaling, "auto_upgrade", true) | ||
| } | ||
|
|
||
| dynamic "gvnic" { | ||
| for_each = lookup(var.node_pools[0], "enable_gvnic", false) ? [true] : [] | ||
| content { | ||
| enabled = gvnic.value | ||
| node_config { | ||
| image_type = lookup(var.node_pools[0], "image_type", "COS_CONTAINERD") | ||
| machine_type = lookup(var.node_pools[0], "machine_type", "e2-medium") | ||
| min_cpu_platform = lookup(var.node_pools[0], "min_cpu_platform", "") | ||
| enable_confidential_storage = lookup(var.node_pools[0], "enable_confidential_storage", false) | ||
| disk_type = lookup(var.node_pools[0], "disk_type", null) | ||
| dynamic "gcfs_config" { | ||
| for_each = lookup(var.node_pools[0], "enable_gcfs", null) != null ? [var.node_pools[0].enable_gcfs] : [] | ||
| content { | ||
| enabled = gcfs_config.value | ||
| } | ||
| } | ||
| } | ||
|
|
||
| dynamic "fast_socket" { | ||
| for_each = lookup(var.node_pools[0], "enable_fast_socket", null) != null ? [var.node_pools[0].enable_fast_socket] : [] | ||
| content { | ||
| enabled = fast_socket.value | ||
| dynamic "gvnic" { | ||
| for_each = lookup(var.node_pools[0], "enable_gvnic", false) ? [true] : [] | ||
| content { | ||
| enabled = gvnic.value | ||
| } | ||
| } | ||
| } | ||
|
|
||
| dynamic "kubelet_config" { | ||
| for_each = length(setintersection( | ||
| keys(var.node_pools[0]), | ||
| ["cpu_manager_policy", "cpu_cfs_quota", "cpu_cfs_quota_period", "insecure_kubelet_readonly_port_enabled", "pod_pids_limit", "container_log_max_size", "container_log_max_files", "image_gc_low_threshold_percent", "image_gc_high_threshold_percent", "image_minimum_gc_age", "image_maximum_gc_age", "allowed_unsafe_sysctls"] | ||
| )) != 0 || var.insecure_kubelet_readonly_port_enabled != null ? [1] : [] | ||
| dynamic "fast_socket" { | ||
| for_each = lookup(var.node_pools[0], "enable_fast_socket", null) != null ? [var.node_pools[0].enable_fast_socket] : [] | ||
| content { | ||
| enabled = fast_socket.value | ||
| } | ||
| } | ||
|
|
||
| content { | ||
| cpu_manager_policy = lookup(var.node_pools[0], "cpu_manager_policy", "static") | ||
| cpu_cfs_quota = lookup(var.node_pools[0], "cpu_cfs_quota", null) | ||
| cpu_cfs_quota_period = lookup(var.node_pools[0], "cpu_cfs_quota_period", null) | ||
| insecure_kubelet_readonly_port_enabled = lookup(var.node_pools[0], "insecure_kubelet_readonly_port_enabled", var.insecure_kubelet_readonly_port_enabled) != null ? upper(tostring(lookup(var.node_pools[0], "insecure_kubelet_readonly_port_enabled", var.insecure_kubelet_readonly_port_enabled))) : null | ||
| pod_pids_limit = lookup(var.node_pools[0], "pod_pids_limit", null) | ||
| container_log_max_size = lookup(var.node_pools[0], "container_log_max_size", null) | ||
| container_log_max_files = lookup(var.node_pools[0], "container_log_max_files", null) | ||
| image_gc_low_threshold_percent = lookup(var.node_pools[0], "image_gc_low_threshold_percent", null) | ||
| image_gc_high_threshold_percent = lookup(var.node_pools[0], "image_gc_high_threshold_percent", null) | ||
| image_minimum_gc_age = lookup(var.node_pools[0], "image_minimum_gc_age", null) | ||
| image_maximum_gc_age = lookup(var.node_pools[0], "image_maximum_gc_age", null) | ||
| allowed_unsafe_sysctls = lookup(var.node_pools[0], "allowed_unsafe_sysctls", null) == null ? null : [for s in split(",", lookup(var.node_pools[0], "allowed_unsafe_sysctls", null)) : trimspace(s)] | ||
| dynamic "kubelet_config" { | ||
| for_each = length(setintersection( | ||
| keys(var.node_pools[0]), | ||
| ["cpu_manager_policy", "cpu_cfs_quota", "cpu_cfs_quota_period", "insecure_kubelet_readonly_port_enabled", "pod_pids_limit", "container_log_max_size", "container_log_max_files", "image_gc_low_threshold_percent", "image_gc_high_threshold_percent", "image_minimum_gc_age", "image_maximum_gc_age", "allowed_unsafe_sysctls"] | ||
| )) != 0 || var.insecure_kubelet_readonly_port_enabled != null ? [1] : [] | ||
|
|
||
| content { | ||
| cpu_manager_policy = lookup(var.node_pools[0], "cpu_manager_policy", "static") | ||
| cpu_cfs_quota = lookup(var.node_pools[0], "cpu_cfs_quota", null) | ||
| cpu_cfs_quota_period = lookup(var.node_pools[0], "cpu_cfs_quota_period", null) | ||
| insecure_kubelet_readonly_port_enabled = lookup(var.node_pools[0], "insecure_kubelet_readonly_port_enabled", var.insecure_kubelet_readonly_port_enabled) != null ? upper(tostring(lookup(var.node_pools[0], "insecure_kubelet_readonly_port_enabled", var.insecure_kubelet_readonly_port_enabled))) : null | ||
| pod_pids_limit = lookup(var.node_pools[0], "pod_pids_limit", null) | ||
| container_log_max_size = lookup(var.node_pools[0], "container_log_max_size", null) | ||
| container_log_max_files = lookup(var.node_pools[0], "container_log_max_files", null) | ||
| image_gc_low_threshold_percent = lookup(var.node_pools[0], "image_gc_low_threshold_percent", null) | ||
| image_gc_high_threshold_percent = lookup(var.node_pools[0], "image_gc_high_threshold_percent", null) | ||
| image_minimum_gc_age = lookup(var.node_pools[0], "image_minimum_gc_age", null) | ||
| image_maximum_gc_age = lookup(var.node_pools[0], "image_maximum_gc_age", null) | ||
| allowed_unsafe_sysctls = lookup(var.node_pools[0], "allowed_unsafe_sysctls", null) == null ? null : [for s in split(",", lookup(var.node_pools[0], "allowed_unsafe_sysctls", null)) : trimspace(s)] | ||
| } | ||
| } | ||
| } | ||
|
|
||
| dynamic "sole_tenant_config" { | ||
| # node_affinity is currently the only member of sole_tenant_config | ||
| for_each = lookup(var.node_pools[0], "node_affinity", null) != null ? [true] : [] | ||
| content { | ||
| dynamic "node_affinity" { | ||
| for_each = lookup(var.node_pools[0], "node_affinity", null) != null ? [lookup(var.node_pools[0], "node_affinity", null)] : [] | ||
| content { | ||
| key = lookup(jsondecode(node_affinity.value), "key", null) | ||
| operator = lookup(jsondecode(node_affinity.value), "operator", null) | ||
| values = lookup(jsondecode(node_affinity.value), "values", []) | ||
| dynamic "sole_tenant_config" { | ||
| # node_affinity is currently the only member of sole_tenant_config | ||
| for_each = lookup(var.node_pools[0], "node_affinity", null) != null ? [true] : [] | ||
| content { | ||
| dynamic "node_affinity" { | ||
| for_each = lookup(var.node_pools[0], "node_affinity", null) != null ? [lookup(var.node_pools[0], "node_affinity", null)] : [] | ||
| content { | ||
| key = lookup(jsondecode(node_affinity.value), "key", null) | ||
| operator = lookup(jsondecode(node_affinity.value), "operator", null) | ||
| values = lookup(jsondecode(node_affinity.value), "values", []) | ||
| } | ||
| } | ||
| } | ||
| } | ||
| } | ||
|
|
||
| service_account = lookup(var.node_pools[0], "service_account", local.service_account) | ||
| service_account = lookup(var.node_pools[0], "service_account", local.service_account) | ||
|
|
||
| tags = concat( | ||
| lookup(local.node_pools_tags, "default_values", [true, true])[0] ? [local.cluster_network_tag] : [], | ||
| lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["${local.cluster_network_tag}-default-pool"] : [], | ||
| lookup(local.node_pools_tags, "all", []), | ||
| lookup(local.node_pools_tags, var.node_pools[0].name, []), | ||
| ) | ||
| tags = concat( | ||
| lookup(local.node_pools_tags, "default_values", [true, true])[0] ? [local.cluster_network_tag] : [], | ||
| lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["${local.cluster_network_tag}-default-pool"] : [], | ||
| lookup(local.node_pools_tags, "all", []), | ||
| lookup(local.node_pools_tags, var.node_pools[0].name, []), | ||
| ) | ||
|
|
||
| logging_variant = lookup(var.node_pools[0], "logging_variant", "DEFAULT") | ||
| logging_variant = lookup(var.node_pools[0], "logging_variant", "DEFAULT") | ||
|
|
||
| dynamic "workload_metadata_config" { | ||
| for_each = local.cluster_node_metadata_config | ||
| dynamic "workload_metadata_config" { | ||
| for_each = local.cluster_node_metadata_config | ||
|
|
||
| content { | ||
| mode = workload_metadata_config.value.mode | ||
| content { | ||
| mode = workload_metadata_config.value.mode | ||
| } | ||
| } | ||
| } | ||
|
|
||
| metadata = local.node_pools_metadata["all"] | ||
| metadata = local.node_pools_metadata["all"] | ||
|
|
||
| {% if beta_cluster %} | ||
| dynamic "sandbox_config" { | ||
| for_each = tobool((lookup(var.node_pools[0], "sandbox_enabled", var.sandbox_enabled))) ? ["gvisor"] : [] | ||
| content { | ||
| sandbox_type = sandbox_config.value | ||
| {% if beta_cluster %} | ||
| dynamic "sandbox_config" { | ||
| for_each = tobool((lookup(var.node_pools[0], "sandbox_enabled", var.sandbox_enabled))) ? ["gvisor"] : [] | ||
| content { | ||
| sandbox_type = sandbox_config.value | ||
| } | ||
| } | ||
| } | ||
|
|
||
| {% endif %} | ||
| boot_disk_kms_key = lookup(var.node_pools[0], "boot_disk_kms_key", var.boot_disk_kms_key) | ||
| {% endif %} | ||
| boot_disk_kms_key = lookup(var.node_pools[0], "boot_disk_kms_key", var.boot_disk_kms_key) | ||
|
|
||
| storage_pools = lookup(var.node_pools[0], "storage_pools", null) != null ? [var.node_pools[0].storage_pools] : [] | ||
| storage_pools = lookup(var.node_pools[0], "storage_pools", null) != null ? [var.node_pools[0].storage_pools] : [] | ||
|
|
||
| shielded_instance_config { | ||
| enable_secure_boot = lookup(var.node_pools[0], "enable_secure_boot", false) | ||
| enable_integrity_monitoring = lookup(var.node_pools[0], "enable_integrity_monitoring", true) | ||
| } | ||
| shielded_instance_config { | ||
| enable_secure_boot = lookup(var.node_pools[0], "enable_secure_boot", false) | ||
| enable_integrity_monitoring = lookup(var.node_pools[0], "enable_integrity_monitoring", true) | ||
| } | ||
|
|
||
| local_ssd_encryption_mode = lookup(var.node_pools[0], "local_ssd_encryption_mode", null) | ||
| max_run_duration = lookup(var.node_pools[0], "max_run_duration", null) | ||
| flex_start = lookup(var.node_pools[0], "flex_start", null) | ||
| local_ssd_encryption_mode = lookup(var.node_pools[0], "local_ssd_encryption_mode", null) | ||
| max_run_duration = lookup(var.node_pools[0], "max_run_duration", null) | ||
| flex_start = lookup(var.node_pools[0], "flex_start", null) | ||
| } | ||
ciroclover marked this conversation as resolved.
Show resolved
Hide resolved
ciroclover marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| } | ||
| } | ||
| {% endif %} | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The condition for
initial_node_countis currentlyvar.remove_default_node_pool || length(var.node_pools) > 0 ? null : max(1, var.initial_node_count). This correctly setsinitial_node_counttonullif the default node pool is to be removed or if custom node pools are defined. However, if thedynamic "node_pool"block (lines 649-767) is intended to explicitly create the default node pool whenremove_default_node_poolis false andlength(var.node_pools) == 0, then thisinitial_node_countshould also benullin that specific case to avoid conflicts or double creation. Otherwise, the implicit default node pool will still be created by GKE, potentially conflicting with the explicitdynamic "node_pool"block if its condition is also met.To ensure the
dynamic "node_pool"block is the sole mechanism for creating a default node pool (when not explicitly removed and no custom node pools are defined), this line should be adjusted to always setinitial_node_counttonullif a default node pool is not desired implicitly, or if the explicitdynamic "node_pool"block is meant to handle it.