Skip to content
Open
Show file tree
Hide file tree
Changes from 6 commits
Commits
Show all changes
18 commits
Select commit Hold shift + click to select a range
4eddbb7
feat: Support for GKE private clusters without default node pool
ciroclover Aug 15, 2025
d4de152
Run autogen and linter
ciroclover Aug 19, 2025
ea54925
Merge branch 'main' into private-cluster-node-pool
apeabody Sep 8, 2025
93aeeea
Merge branch 'main' into private-cluster-node-pool
apeabody Oct 30, 2025
a745e01
Simplify how cluster_name_computed is generated
ciroclover Oct 31, 2025
d719cc6
Merge branch 'main' into private-cluster-node-pool
apeabody Nov 17, 2025
7161fb6
Merge branch 'terraform-google-modules:main' into private-cluster-nod…
ciroclover Nov 18, 2025
1f9c0d2
Fix remove_default_node_pool conflict in node_pool generation
ciroclover Nov 18, 2025
b2ac429
Ensure that initial node count is greather than zero when autopilot i…
ciroclover Dec 9, 2025
9bb1b1f
Improve expression readability
ciroclover Dec 9, 2025
0e1d5bd
Merge branch 'main' into private-cluster-node-pool
ciroclover Dec 9, 2025
677fe4b
Fix logic for initial_node_count for not autopilot clusters
ciroclover Dec 9, 2025
56759d1
Ensure initial_node_count compatibility with remove_default_node_pool
ciroclover Dec 9, 2025
170af42
resolve initial_node_count and default pool creation conflicts
ciroclover Dec 9, 2025
f9c0edd
resolve default node pool initial count configuration conflicts
ciroclover Dec 9, 2025
a28df9d
ensure initial node count is at least one for defaults
ciroclover Dec 10, 2025
d0c0041
Merge branch 'main' into private-cluster-node-pool
ciroclover Dec 10, 2025
02e0e2c
Remove modules/beta-autopilot-private-cluster/core
ciroclover Dec 10, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
185 changes: 95 additions & 90 deletions autogen/main/cluster.tf.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ resource "google_container_cluster" "primary" {
}

{% if autopilot_cluster != true %}
initial_node_count = length(var.node_pools) == 0 ? var.initial_node_count : null

dynamic "network_policy" {
for_each = local.cluster_network_policy

Expand Down Expand Up @@ -632,121 +634,124 @@ resource "google_container_cluster" "primary" {
delete = lookup(var.timeouts, "delete", "45m")
}
{% if autopilot_cluster != true %}
node_pool {
name = "default-pool"
initial_node_count = var.initial_node_count

management {
auto_repair = lookup(var.cluster_autoscaling, "auto_repair", true)
auto_upgrade = lookup(var.cluster_autoscaling, "auto_upgrade", true)
}

node_config {
image_type = lookup(var.node_pools[0], "image_type", "COS_CONTAINERD")
machine_type = lookup(var.node_pools[0], "machine_type", "e2-medium")
min_cpu_platform = lookup(var.node_pools[0], "min_cpu_platform", "")
enable_confidential_storage = lookup(var.node_pools[0], "enable_confidential_storage", false)
disk_type = lookup(var.node_pools[0], "disk_type", null)
dynamic "gcfs_config" {
for_each = lookup(var.node_pools[0], "enable_gcfs", null) != null ? [var.node_pools[0].enable_gcfs] : []
content {
enabled = gcfs_config.value
}
dynamic "node_pool" {
for_each = length(var.node_pools) == 0 ? [] : [1]
content {
name = "default-pool"
initial_node_count = var.initial_node_count
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

The initial_node_count for the default node pool should be at least 1 if autoscaling is not enabled. When var.initial_node_count is 0 (its default), this will be set to 0, which is likely invalid. This should be consistent with the top-level initial_node_count logic.

      initial_node_count = max(1, var.initial_node_count)


management {
auto_repair = lookup(var.cluster_autoscaling, "auto_repair", true)
auto_upgrade = lookup(var.cluster_autoscaling, "auto_upgrade", true)
}

dynamic "gvnic" {
for_each = lookup(var.node_pools[0], "enable_gvnic", false) ? [true] : []
content {
enabled = gvnic.value
node_config {
image_type = lookup(var.node_pools[0], "image_type", "COS_CONTAINERD")
machine_type = lookup(var.node_pools[0], "machine_type", "e2-medium")
min_cpu_platform = lookup(var.node_pools[0], "min_cpu_platform", "")
enable_confidential_storage = lookup(var.node_pools[0], "enable_confidential_storage", false)
disk_type = lookup(var.node_pools[0], "disk_type", null)
dynamic "gcfs_config" {
for_each = lookup(var.node_pools[0], "enable_gcfs", null) != null ? [var.node_pools[0].enable_gcfs] : []
content {
enabled = gcfs_config.value
}
}
}

dynamic "fast_socket" {
for_each = lookup(var.node_pools[0], "enable_fast_socket", null) != null ? [var.node_pools[0].enable_fast_socket] : []
content {
enabled = fast_socket.value
dynamic "gvnic" {
for_each = lookup(var.node_pools[0], "enable_gvnic", false) ? [true] : []
content {
enabled = gvnic.value
}
}
}

dynamic "kubelet_config" {
for_each = length(setintersection(
keys(var.node_pools[0]),
["cpu_manager_policy", "cpu_cfs_quota", "cpu_cfs_quota_period", "insecure_kubelet_readonly_port_enabled", "pod_pids_limit", "container_log_max_size", "container_log_max_files", "image_gc_low_threshold_percent", "image_gc_high_threshold_percent", "image_minimum_gc_age", "image_maximum_gc_age", "allowed_unsafe_sysctls"]
)) != 0 || var.insecure_kubelet_readonly_port_enabled != null ? [1] : []
dynamic "fast_socket" {
for_each = lookup(var.node_pools[0], "enable_fast_socket", null) != null ? [var.node_pools[0].enable_fast_socket] : []
content {
enabled = fast_socket.value
}
}

content {
cpu_manager_policy = lookup(var.node_pools[0], "cpu_manager_policy", "static")
cpu_cfs_quota = lookup(var.node_pools[0], "cpu_cfs_quota", null)
cpu_cfs_quota_period = lookup(var.node_pools[0], "cpu_cfs_quota_period", null)
insecure_kubelet_readonly_port_enabled = lookup(var.node_pools[0], "insecure_kubelet_readonly_port_enabled", var.insecure_kubelet_readonly_port_enabled) != null ? upper(tostring(lookup(var.node_pools[0], "insecure_kubelet_readonly_port_enabled", var.insecure_kubelet_readonly_port_enabled))) : null
pod_pids_limit = lookup(var.node_pools[0], "pod_pids_limit", null)
container_log_max_size = lookup(var.node_pools[0], "container_log_max_size", null)
container_log_max_files = lookup(var.node_pools[0], "container_log_max_files", null)
image_gc_low_threshold_percent = lookup(var.node_pools[0], "image_gc_low_threshold_percent", null)
image_gc_high_threshold_percent = lookup(var.node_pools[0], "image_gc_high_threshold_percent", null)
image_minimum_gc_age = lookup(var.node_pools[0], "image_minimum_gc_age", null)
image_maximum_gc_age = lookup(var.node_pools[0], "image_maximum_gc_age", null)
allowed_unsafe_sysctls = lookup(var.node_pools[0], "allowed_unsafe_sysctls", null) == null ? null : [for s in split(",", lookup(var.node_pools[0], "allowed_unsafe_sysctls", null)) : trimspace(s)]
dynamic "kubelet_config" {
for_each = length(setintersection(
keys(var.node_pools[0]),
["cpu_manager_policy", "cpu_cfs_quota", "cpu_cfs_quota_period", "insecure_kubelet_readonly_port_enabled", "pod_pids_limit", "container_log_max_size", "container_log_max_files", "image_gc_low_threshold_percent", "image_gc_high_threshold_percent", "image_minimum_gc_age", "image_maximum_gc_age", "allowed_unsafe_sysctls"]
)) != 0 || var.insecure_kubelet_readonly_port_enabled != null ? [1] : []

content {
cpu_manager_policy = lookup(var.node_pools[0], "cpu_manager_policy", "static")
cpu_cfs_quota = lookup(var.node_pools[0], "cpu_cfs_quota", null)
cpu_cfs_quota_period = lookup(var.node_pools[0], "cpu_cfs_quota_period", null)
insecure_kubelet_readonly_port_enabled = lookup(var.node_pools[0], "insecure_kubelet_readonly_port_enabled", var.insecure_kubelet_readonly_port_enabled) != null ? upper(tostring(lookup(var.node_pools[0], "insecure_kubelet_readonly_port_enabled", var.insecure_kubelet_readonly_port_enabled))) : null
pod_pids_limit = lookup(var.node_pools[0], "pod_pids_limit", null)
container_log_max_size = lookup(var.node_pools[0], "container_log_max_size", null)
container_log_max_files = lookup(var.node_pools[0], "container_log_max_files", null)
image_gc_low_threshold_percent = lookup(var.node_pools[0], "image_gc_low_threshold_percent", null)
image_gc_high_threshold_percent = lookup(var.node_pools[0], "image_gc_high_threshold_percent", null)
image_minimum_gc_age = lookup(var.node_pools[0], "image_minimum_gc_age", null)
image_maximum_gc_age = lookup(var.node_pools[0], "image_maximum_gc_age", null)
allowed_unsafe_sysctls = lookup(var.node_pools[0], "allowed_unsafe_sysctls", null) == null ? null : [for s in split(",", lookup(var.node_pools[0], "allowed_unsafe_sysctls", null)) : trimspace(s)]
}
}
}

dynamic "sole_tenant_config" {
# node_affinity is currently the only member of sole_tenant_config
for_each = lookup(var.node_pools[0], "node_affinity", null) != null ? [true] : []
content {
dynamic "node_affinity" {
for_each = lookup(var.node_pools[0], "node_affinity", null) != null ? [lookup(var.node_pools[0], "node_affinity", null)] : []
content {
key = lookup(jsondecode(node_affinity.value), "key", null)
operator = lookup(jsondecode(node_affinity.value), "operator", null)
values = lookup(jsondecode(node_affinity.value), "values", [])
dynamic "sole_tenant_config" {
# node_affinity is currently the only member of sole_tenant_config
for_each = lookup(var.node_pools[0], "node_affinity", null) != null ? [true] : []
content {
dynamic "node_affinity" {
for_each = lookup(var.node_pools[0], "node_affinity", null) != null ? [lookup(var.node_pools[0], "node_affinity", null)] : []
content {
key = lookup(jsondecode(node_affinity.value), "key", null)
operator = lookup(jsondecode(node_affinity.value), "operator", null)
values = lookup(jsondecode(node_affinity.value), "values", [])
}
}
}
}
}

service_account = lookup(var.node_pools[0], "service_account", local.service_account)
service_account = lookup(var.node_pools[0], "service_account", local.service_account)

tags = concat(
lookup(local.node_pools_tags, "default_values", [true, true])[0] ? [local.cluster_network_tag] : [],
lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["${local.cluster_network_tag}-default-pool"] : [],
lookup(local.node_pools_tags, "all", []),
lookup(local.node_pools_tags, var.node_pools[0].name, []),
)
tags = concat(
lookup(local.node_pools_tags, "default_values", [true, true])[0] ? [local.cluster_network_tag] : [],
lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["${local.cluster_network_tag}-default-pool"] : [],
lookup(local.node_pools_tags, "all", []),
lookup(local.node_pools_tags, var.node_pools[0].name, []),
)

logging_variant = lookup(var.node_pools[0], "logging_variant", "DEFAULT")
logging_variant = lookup(var.node_pools[0], "logging_variant", "DEFAULT")

dynamic "workload_metadata_config" {
for_each = local.cluster_node_metadata_config
dynamic "workload_metadata_config" {
for_each = local.cluster_node_metadata_config

content {
mode = workload_metadata_config.value.mode
content {
mode = workload_metadata_config.value.mode
}
}
}

metadata = local.node_pools_metadata["all"]
metadata = local.node_pools_metadata["all"]

{% if beta_cluster %}
dynamic "sandbox_config" {
for_each = tobool((lookup(var.node_pools[0], "sandbox_enabled", var.sandbox_enabled))) ? ["gvisor"] : []
content {
sandbox_type = sandbox_config.value
{% if beta_cluster %}
dynamic "sandbox_config" {
for_each = tobool((lookup(var.node_pools[0], "sandbox_enabled", var.sandbox_enabled))) ? ["gvisor"] : []
content {
sandbox_type = sandbox_config.value
}
}
}

{% endif %}
boot_disk_kms_key = lookup(var.node_pools[0], "boot_disk_kms_key", var.boot_disk_kms_key)
{% endif %}
boot_disk_kms_key = lookup(var.node_pools[0], "boot_disk_kms_key", var.boot_disk_kms_key)

storage_pools = lookup(var.node_pools[0], "storage_pools", null) != null ? [var.node_pools[0].storage_pools] : []
storage_pools = lookup(var.node_pools[0], "storage_pools", null) != null ? [var.node_pools[0].storage_pools] : []

shielded_instance_config {
enable_secure_boot = lookup(var.node_pools[0], "enable_secure_boot", false)
enable_integrity_monitoring = lookup(var.node_pools[0], "enable_integrity_monitoring", true)
}
shielded_instance_config {
enable_secure_boot = lookup(var.node_pools[0], "enable_secure_boot", false)
enable_integrity_monitoring = lookup(var.node_pools[0], "enable_integrity_monitoring", true)
}

local_ssd_encryption_mode = lookup(var.node_pools[0], "local_ssd_encryption_mode", null)
max_run_duration = lookup(var.node_pools[0], "max_run_duration", null)
flex_start = lookup(var.node_pools[0], "flex_start", null)
local_ssd_encryption_mode = lookup(var.node_pools[0], "local_ssd_encryption_mode", null)
max_run_duration = lookup(var.node_pools[0], "max_run_duration", null)
flex_start = lookup(var.node_pools[0], "flex_start", null)
}
}
}
{% endif %}
Expand Down
4 changes: 1 addition & 3 deletions autogen/main/main.tf.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -202,9 +202,7 @@ locals {
cluster_zones = sort(local.cluster_output_zones)

{% if autopilot_cluster != true %}
// node pool ID is in the form projects/<project-id>/locations/<location>/clusters/<cluster-name>/nodePools/<nodepool-name>
cluster_name_parts_from_nodepool = split("/", element(values(google_container_node_pool.pools)[*].id, 0))
cluster_name_computed = element(local.cluster_name_parts_from_nodepool, length(local.cluster_name_parts_from_nodepool) - 3)
cluster_name_computed = var.name
{% else %}
// cluster ID is in the form project/location/name
cluster_name_computed = element(split("/", local.cluster_id), length(split("/", local.cluster_id)) - 1)
Expand Down
Loading