-
Notifications
You must be signed in to change notification settings - Fork 1.3k
feat: Support for GKE private clusters without default node pool #2408
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 4 commits
4eddbb7
d4de152
ea54925
93aeeea
a745e01
d719cc6
7161fb6
1f9c0d2
b2ac429
9bb1b1f
0e1d5bd
677fe4b
56759d1
170af42
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -45,6 +45,8 @@ resource "google_container_cluster" "primary" { | |
| } | ||
|
|
||
| {% if autopilot_cluster != true %} | ||
| initial_node_count = length(var.node_pools) == 0 ? var.initial_node_count : null | ||
|
|
||
| dynamic "network_policy" { | ||
| for_each = local.cluster_network_policy | ||
|
|
||
|
|
@@ -632,121 +634,124 @@ resource "google_container_cluster" "primary" { | |
| delete = lookup(var.timeouts, "delete", "45m") | ||
| } | ||
| {% if autopilot_cluster != true %} | ||
| node_pool { | ||
| name = "default-pool" | ||
| initial_node_count = var.initial_node_count | ||
|
|
||
| management { | ||
| auto_repair = lookup(var.cluster_autoscaling, "auto_repair", true) | ||
| auto_upgrade = lookup(var.cluster_autoscaling, "auto_upgrade", true) | ||
| } | ||
|
|
||
| node_config { | ||
| image_type = lookup(var.node_pools[0], "image_type", "COS_CONTAINERD") | ||
| machine_type = lookup(var.node_pools[0], "machine_type", "e2-medium") | ||
| min_cpu_platform = lookup(var.node_pools[0], "min_cpu_platform", "") | ||
| enable_confidential_storage = lookup(var.node_pools[0], "enable_confidential_storage", false) | ||
| disk_type = lookup(var.node_pools[0], "disk_type", null) | ||
| dynamic "gcfs_config" { | ||
| for_each = lookup(var.node_pools[0], "enable_gcfs", null) != null ? [var.node_pools[0].enable_gcfs] : [] | ||
| content { | ||
| enabled = gcfs_config.value | ||
| } | ||
| dynamic "node_pool" { | ||
| for_each = length(var.node_pools) == 0 ? [] : [1] | ||
| content { | ||
| name = "default-pool" | ||
| initial_node_count = var.initial_node_count | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The |
||
|
|
||
| management { | ||
| auto_repair = lookup(var.cluster_autoscaling, "auto_repair", true) | ||
| auto_upgrade = lookup(var.cluster_autoscaling, "auto_upgrade", true) | ||
| } | ||
|
|
||
| dynamic "gvnic" { | ||
| for_each = lookup(var.node_pools[0], "enable_gvnic", false) ? [true] : [] | ||
| content { | ||
| enabled = gvnic.value | ||
| node_config { | ||
| image_type = lookup(var.node_pools[0], "image_type", "COS_CONTAINERD") | ||
| machine_type = lookup(var.node_pools[0], "machine_type", "e2-medium") | ||
| min_cpu_platform = lookup(var.node_pools[0], "min_cpu_platform", "") | ||
| enable_confidential_storage = lookup(var.node_pools[0], "enable_confidential_storage", false) | ||
| disk_type = lookup(var.node_pools[0], "disk_type", null) | ||
| dynamic "gcfs_config" { | ||
| for_each = lookup(var.node_pools[0], "enable_gcfs", null) != null ? [var.node_pools[0].enable_gcfs] : [] | ||
| content { | ||
| enabled = gcfs_config.value | ||
| } | ||
| } | ||
| } | ||
|
|
||
| dynamic "fast_socket" { | ||
| for_each = lookup(var.node_pools[0], "enable_fast_socket", null) != null ? [var.node_pools[0].enable_fast_socket] : [] | ||
| content { | ||
| enabled = fast_socket.value | ||
| dynamic "gvnic" { | ||
| for_each = lookup(var.node_pools[0], "enable_gvnic", false) ? [true] : [] | ||
| content { | ||
| enabled = gvnic.value | ||
| } | ||
| } | ||
| } | ||
|
|
||
| dynamic "kubelet_config" { | ||
| for_each = length(setintersection( | ||
| keys(var.node_pools[0]), | ||
| ["cpu_manager_policy", "cpu_cfs_quota", "cpu_cfs_quota_period", "insecure_kubelet_readonly_port_enabled", "pod_pids_limit", "container_log_max_size", "container_log_max_files", "image_gc_low_threshold_percent", "image_gc_high_threshold_percent", "image_minimum_gc_age", "image_maximum_gc_age", "allowed_unsafe_sysctls"] | ||
| )) != 0 || var.insecure_kubelet_readonly_port_enabled != null ? [1] : [] | ||
| dynamic "fast_socket" { | ||
| for_each = lookup(var.node_pools[0], "enable_fast_socket", null) != null ? [var.node_pools[0].enable_fast_socket] : [] | ||
| content { | ||
| enabled = fast_socket.value | ||
| } | ||
| } | ||
|
|
||
| content { | ||
| cpu_manager_policy = lookup(var.node_pools[0], "cpu_manager_policy", "static") | ||
| cpu_cfs_quota = lookup(var.node_pools[0], "cpu_cfs_quota", null) | ||
| cpu_cfs_quota_period = lookup(var.node_pools[0], "cpu_cfs_quota_period", null) | ||
| insecure_kubelet_readonly_port_enabled = lookup(var.node_pools[0], "insecure_kubelet_readonly_port_enabled", var.insecure_kubelet_readonly_port_enabled) != null ? upper(tostring(lookup(var.node_pools[0], "insecure_kubelet_readonly_port_enabled", var.insecure_kubelet_readonly_port_enabled))) : null | ||
| pod_pids_limit = lookup(var.node_pools[0], "pod_pids_limit", null) | ||
| container_log_max_size = lookup(var.node_pools[0], "container_log_max_size", null) | ||
| container_log_max_files = lookup(var.node_pools[0], "container_log_max_files", null) | ||
| image_gc_low_threshold_percent = lookup(var.node_pools[0], "image_gc_low_threshold_percent", null) | ||
| image_gc_high_threshold_percent = lookup(var.node_pools[0], "image_gc_high_threshold_percent", null) | ||
| image_minimum_gc_age = lookup(var.node_pools[0], "image_minimum_gc_age", null) | ||
| image_maximum_gc_age = lookup(var.node_pools[0], "image_maximum_gc_age", null) | ||
| allowed_unsafe_sysctls = lookup(var.node_pools[0], "allowed_unsafe_sysctls", null) == null ? null : [for s in split(",", lookup(var.node_pools[0], "allowed_unsafe_sysctls", null)) : trimspace(s)] | ||
| dynamic "kubelet_config" { | ||
| for_each = length(setintersection( | ||
| keys(var.node_pools[0]), | ||
| ["cpu_manager_policy", "cpu_cfs_quota", "cpu_cfs_quota_period", "insecure_kubelet_readonly_port_enabled", "pod_pids_limit", "container_log_max_size", "container_log_max_files", "image_gc_low_threshold_percent", "image_gc_high_threshold_percent", "image_minimum_gc_age", "image_maximum_gc_age", "allowed_unsafe_sysctls"] | ||
| )) != 0 || var.insecure_kubelet_readonly_port_enabled != null ? [1] : [] | ||
|
|
||
| content { | ||
| cpu_manager_policy = lookup(var.node_pools[0], "cpu_manager_policy", "static") | ||
| cpu_cfs_quota = lookup(var.node_pools[0], "cpu_cfs_quota", null) | ||
| cpu_cfs_quota_period = lookup(var.node_pools[0], "cpu_cfs_quota_period", null) | ||
| insecure_kubelet_readonly_port_enabled = lookup(var.node_pools[0], "insecure_kubelet_readonly_port_enabled", var.insecure_kubelet_readonly_port_enabled) != null ? upper(tostring(lookup(var.node_pools[0], "insecure_kubelet_readonly_port_enabled", var.insecure_kubelet_readonly_port_enabled))) : null | ||
| pod_pids_limit = lookup(var.node_pools[0], "pod_pids_limit", null) | ||
| container_log_max_size = lookup(var.node_pools[0], "container_log_max_size", null) | ||
| container_log_max_files = lookup(var.node_pools[0], "container_log_max_files", null) | ||
| image_gc_low_threshold_percent = lookup(var.node_pools[0], "image_gc_low_threshold_percent", null) | ||
| image_gc_high_threshold_percent = lookup(var.node_pools[0], "image_gc_high_threshold_percent", null) | ||
| image_minimum_gc_age = lookup(var.node_pools[0], "image_minimum_gc_age", null) | ||
| image_maximum_gc_age = lookup(var.node_pools[0], "image_maximum_gc_age", null) | ||
| allowed_unsafe_sysctls = lookup(var.node_pools[0], "allowed_unsafe_sysctls", null) == null ? null : [for s in split(",", lookup(var.node_pools[0], "allowed_unsafe_sysctls", null)) : trimspace(s)] | ||
| } | ||
| } | ||
| } | ||
|
|
||
| dynamic "sole_tenant_config" { | ||
| # node_affinity is currently the only member of sole_tenant_config | ||
| for_each = lookup(var.node_pools[0], "node_affinity", null) != null ? [true] : [] | ||
| content { | ||
| dynamic "node_affinity" { | ||
| for_each = lookup(var.node_pools[0], "node_affinity", null) != null ? [lookup(var.node_pools[0], "node_affinity", null)] : [] | ||
| content { | ||
| key = lookup(jsondecode(node_affinity.value), "key", null) | ||
| operator = lookup(jsondecode(node_affinity.value), "operator", null) | ||
| values = lookup(jsondecode(node_affinity.value), "values", []) | ||
| dynamic "sole_tenant_config" { | ||
| # node_affinity is currently the only member of sole_tenant_config | ||
| for_each = lookup(var.node_pools[0], "node_affinity", null) != null ? [true] : [] | ||
| content { | ||
| dynamic "node_affinity" { | ||
| for_each = lookup(var.node_pools[0], "node_affinity", null) != null ? [lookup(var.node_pools[0], "node_affinity", null)] : [] | ||
| content { | ||
| key = lookup(jsondecode(node_affinity.value), "key", null) | ||
| operator = lookup(jsondecode(node_affinity.value), "operator", null) | ||
| values = lookup(jsondecode(node_affinity.value), "values", []) | ||
| } | ||
| } | ||
| } | ||
| } | ||
| } | ||
|
|
||
| service_account = lookup(var.node_pools[0], "service_account", local.service_account) | ||
| service_account = lookup(var.node_pools[0], "service_account", local.service_account) | ||
|
|
||
| tags = concat( | ||
| lookup(local.node_pools_tags, "default_values", [true, true])[0] ? [local.cluster_network_tag] : [], | ||
| lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["${local.cluster_network_tag}-default-pool"] : [], | ||
| lookup(local.node_pools_tags, "all", []), | ||
| lookup(local.node_pools_tags, var.node_pools[0].name, []), | ||
| ) | ||
| tags = concat( | ||
| lookup(local.node_pools_tags, "default_values", [true, true])[0] ? [local.cluster_network_tag] : [], | ||
| lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["${local.cluster_network_tag}-default-pool"] : [], | ||
| lookup(local.node_pools_tags, "all", []), | ||
| lookup(local.node_pools_tags, var.node_pools[0].name, []), | ||
| ) | ||
|
|
||
| logging_variant = lookup(var.node_pools[0], "logging_variant", "DEFAULT") | ||
| logging_variant = lookup(var.node_pools[0], "logging_variant", "DEFAULT") | ||
|
|
||
| dynamic "workload_metadata_config" { | ||
| for_each = local.cluster_node_metadata_config | ||
| dynamic "workload_metadata_config" { | ||
| for_each = local.cluster_node_metadata_config | ||
|
|
||
| content { | ||
| mode = workload_metadata_config.value.mode | ||
| content { | ||
| mode = workload_metadata_config.value.mode | ||
| } | ||
| } | ||
| } | ||
|
|
||
| metadata = local.node_pools_metadata["all"] | ||
| metadata = local.node_pools_metadata["all"] | ||
|
|
||
| {% if beta_cluster %} | ||
| dynamic "sandbox_config" { | ||
| for_each = tobool((lookup(var.node_pools[0], "sandbox_enabled", var.sandbox_enabled))) ? ["gvisor"] : [] | ||
| content { | ||
| sandbox_type = sandbox_config.value | ||
| {% if beta_cluster %} | ||
| dynamic "sandbox_config" { | ||
| for_each = tobool((lookup(var.node_pools[0], "sandbox_enabled", var.sandbox_enabled))) ? ["gvisor"] : [] | ||
| content { | ||
| sandbox_type = sandbox_config.value | ||
| } | ||
| } | ||
| } | ||
|
|
||
| {% endif %} | ||
| boot_disk_kms_key = lookup(var.node_pools[0], "boot_disk_kms_key", var.boot_disk_kms_key) | ||
| {% endif %} | ||
| boot_disk_kms_key = lookup(var.node_pools[0], "boot_disk_kms_key", var.boot_disk_kms_key) | ||
|
|
||
| storage_pools = lookup(var.node_pools[0], "storage_pools", null) != null ? [var.node_pools[0].storage_pools] : [] | ||
| storage_pools = lookup(var.node_pools[0], "storage_pools", null) != null ? [var.node_pools[0].storage_pools] : [] | ||
|
|
||
| shielded_instance_config { | ||
| enable_secure_boot = lookup(var.node_pools[0], "enable_secure_boot", false) | ||
| enable_integrity_monitoring = lookup(var.node_pools[0], "enable_integrity_monitoring", true) | ||
| } | ||
| shielded_instance_config { | ||
| enable_secure_boot = lookup(var.node_pools[0], "enable_secure_boot", false) | ||
| enable_integrity_monitoring = lookup(var.node_pools[0], "enable_integrity_monitoring", true) | ||
| } | ||
|
|
||
| local_ssd_encryption_mode = lookup(var.node_pools[0], "local_ssd_encryption_mode", null) | ||
| max_run_duration = lookup(var.node_pools[0], "max_run_duration", null) | ||
| flex_start = lookup(var.node_pools[0], "flex_start", null) | ||
| local_ssd_encryption_mode = lookup(var.node_pools[0], "local_ssd_encryption_mode", null) | ||
| max_run_duration = lookup(var.node_pools[0], "max_run_duration", null) | ||
| flex_start = lookup(var.node_pools[0], "flex_start", null) | ||
| } | ||
ciroclover marked this conversation as resolved.
Show resolved
Hide resolved
Comment on lines
+660
to
+766
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The logic for the dynamic The configuration for this default node pool should not depend on For example:
This applies to all references to |
||
| } | ||
| } | ||
| {% endif %} | ||
apeabody marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.