Skip to content
Open
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
185 changes: 95 additions & 90 deletions autogen/main/cluster.tf.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ resource "google_container_cluster" "primary" {
}

{% if autopilot_cluster != true %}
initial_node_count = length(var.node_pools) == 0 ? var.initial_node_count : null

dynamic "network_policy" {
for_each = local.cluster_network_policy

Expand Down Expand Up @@ -632,121 +634,124 @@ resource "google_container_cluster" "primary" {
delete = lookup(var.timeouts, "delete", "45m")
}
{% if autopilot_cluster != true %}
node_pool {
name = "default-pool"
initial_node_count = var.initial_node_count

management {
auto_repair = lookup(var.cluster_autoscaling, "auto_repair", true)
auto_upgrade = lookup(var.cluster_autoscaling, "auto_upgrade", true)
}

node_config {
image_type = lookup(var.node_pools[0], "image_type", "COS_CONTAINERD")
machine_type = lookup(var.node_pools[0], "machine_type", "e2-medium")
min_cpu_platform = lookup(var.node_pools[0], "min_cpu_platform", "")
enable_confidential_storage = lookup(var.node_pools[0], "enable_confidential_storage", false)
disk_type = lookup(var.node_pools[0], "disk_type", null)
dynamic "gcfs_config" {
for_each = lookup(var.node_pools[0], "enable_gcfs", null) != null ? [var.node_pools[0].enable_gcfs] : []
content {
enabled = gcfs_config.value
}
dynamic "node_pool" {
for_each = length(var.node_pools) == 0 ? [] : [1]
content {
name = "default-pool"
initial_node_count = var.initial_node_count
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

The initial_node_count for the default node pool should be at least 1 if autoscaling is not enabled. When var.initial_node_count is 0 (its default), this will be set to 0, which is likely invalid. This should be consistent with the top-level initial_node_count logic.

      initial_node_count = max(1, var.initial_node_count)


management {
auto_repair = lookup(var.cluster_autoscaling, "auto_repair", true)
auto_upgrade = lookup(var.cluster_autoscaling, "auto_upgrade", true)
}

dynamic "gvnic" {
for_each = lookup(var.node_pools[0], "enable_gvnic", false) ? [true] : []
content {
enabled = gvnic.value
node_config {
image_type = lookup(var.node_pools[0], "image_type", "COS_CONTAINERD")
machine_type = lookup(var.node_pools[0], "machine_type", "e2-medium")
min_cpu_platform = lookup(var.node_pools[0], "min_cpu_platform", "")
enable_confidential_storage = lookup(var.node_pools[0], "enable_confidential_storage", false)
disk_type = lookup(var.node_pools[0], "disk_type", null)
dynamic "gcfs_config" {
for_each = lookup(var.node_pools[0], "enable_gcfs", null) != null ? [var.node_pools[0].enable_gcfs] : []
content {
enabled = gcfs_config.value
}
}
}

dynamic "fast_socket" {
for_each = lookup(var.node_pools[0], "enable_fast_socket", null) != null ? [var.node_pools[0].enable_fast_socket] : []
content {
enabled = fast_socket.value
dynamic "gvnic" {
for_each = lookup(var.node_pools[0], "enable_gvnic", false) ? [true] : []
content {
enabled = gvnic.value
}
}
}

dynamic "kubelet_config" {
for_each = length(setintersection(
keys(var.node_pools[0]),
["cpu_manager_policy", "cpu_cfs_quota", "cpu_cfs_quota_period", "insecure_kubelet_readonly_port_enabled", "pod_pids_limit", "container_log_max_size", "container_log_max_files", "image_gc_low_threshold_percent", "image_gc_high_threshold_percent", "image_minimum_gc_age", "image_maximum_gc_age", "allowed_unsafe_sysctls"]
)) != 0 || var.insecure_kubelet_readonly_port_enabled != null ? [1] : []
dynamic "fast_socket" {
for_each = lookup(var.node_pools[0], "enable_fast_socket", null) != null ? [var.node_pools[0].enable_fast_socket] : []
content {
enabled = fast_socket.value
}
}

content {
cpu_manager_policy = lookup(var.node_pools[0], "cpu_manager_policy", "static")
cpu_cfs_quota = lookup(var.node_pools[0], "cpu_cfs_quota", null)
cpu_cfs_quota_period = lookup(var.node_pools[0], "cpu_cfs_quota_period", null)
insecure_kubelet_readonly_port_enabled = lookup(var.node_pools[0], "insecure_kubelet_readonly_port_enabled", var.insecure_kubelet_readonly_port_enabled) != null ? upper(tostring(lookup(var.node_pools[0], "insecure_kubelet_readonly_port_enabled", var.insecure_kubelet_readonly_port_enabled))) : null
pod_pids_limit = lookup(var.node_pools[0], "pod_pids_limit", null)
container_log_max_size = lookup(var.node_pools[0], "container_log_max_size", null)
container_log_max_files = lookup(var.node_pools[0], "container_log_max_files", null)
image_gc_low_threshold_percent = lookup(var.node_pools[0], "image_gc_low_threshold_percent", null)
image_gc_high_threshold_percent = lookup(var.node_pools[0], "image_gc_high_threshold_percent", null)
image_minimum_gc_age = lookup(var.node_pools[0], "image_minimum_gc_age", null)
image_maximum_gc_age = lookup(var.node_pools[0], "image_maximum_gc_age", null)
allowed_unsafe_sysctls = lookup(var.node_pools[0], "allowed_unsafe_sysctls", null) == null ? null : [for s in split(",", lookup(var.node_pools[0], "allowed_unsafe_sysctls", null)) : trimspace(s)]
dynamic "kubelet_config" {
for_each = length(setintersection(
keys(var.node_pools[0]),
["cpu_manager_policy", "cpu_cfs_quota", "cpu_cfs_quota_period", "insecure_kubelet_readonly_port_enabled", "pod_pids_limit", "container_log_max_size", "container_log_max_files", "image_gc_low_threshold_percent", "image_gc_high_threshold_percent", "image_minimum_gc_age", "image_maximum_gc_age", "allowed_unsafe_sysctls"]
)) != 0 || var.insecure_kubelet_readonly_port_enabled != null ? [1] : []

content {
cpu_manager_policy = lookup(var.node_pools[0], "cpu_manager_policy", "static")
cpu_cfs_quota = lookup(var.node_pools[0], "cpu_cfs_quota", null)
cpu_cfs_quota_period = lookup(var.node_pools[0], "cpu_cfs_quota_period", null)
insecure_kubelet_readonly_port_enabled = lookup(var.node_pools[0], "insecure_kubelet_readonly_port_enabled", var.insecure_kubelet_readonly_port_enabled) != null ? upper(tostring(lookup(var.node_pools[0], "insecure_kubelet_readonly_port_enabled", var.insecure_kubelet_readonly_port_enabled))) : null
pod_pids_limit = lookup(var.node_pools[0], "pod_pids_limit", null)
container_log_max_size = lookup(var.node_pools[0], "container_log_max_size", null)
container_log_max_files = lookup(var.node_pools[0], "container_log_max_files", null)
image_gc_low_threshold_percent = lookup(var.node_pools[0], "image_gc_low_threshold_percent", null)
image_gc_high_threshold_percent = lookup(var.node_pools[0], "image_gc_high_threshold_percent", null)
image_minimum_gc_age = lookup(var.node_pools[0], "image_minimum_gc_age", null)
image_maximum_gc_age = lookup(var.node_pools[0], "image_maximum_gc_age", null)
allowed_unsafe_sysctls = lookup(var.node_pools[0], "allowed_unsafe_sysctls", null) == null ? null : [for s in split(",", lookup(var.node_pools[0], "allowed_unsafe_sysctls", null)) : trimspace(s)]
}
}
}

dynamic "sole_tenant_config" {
# node_affinity is currently the only member of sole_tenant_config
for_each = lookup(var.node_pools[0], "node_affinity", null) != null ? [true] : []
content {
dynamic "node_affinity" {
for_each = lookup(var.node_pools[0], "node_affinity", null) != null ? [lookup(var.node_pools[0], "node_affinity", null)] : []
content {
key = lookup(jsondecode(node_affinity.value), "key", null)
operator = lookup(jsondecode(node_affinity.value), "operator", null)
values = lookup(jsondecode(node_affinity.value), "values", [])
dynamic "sole_tenant_config" {
# node_affinity is currently the only member of sole_tenant_config
for_each = lookup(var.node_pools[0], "node_affinity", null) != null ? [true] : []
content {
dynamic "node_affinity" {
for_each = lookup(var.node_pools[0], "node_affinity", null) != null ? [lookup(var.node_pools[0], "node_affinity", null)] : []
content {
key = lookup(jsondecode(node_affinity.value), "key", null)
operator = lookup(jsondecode(node_affinity.value), "operator", null)
values = lookup(jsondecode(node_affinity.value), "values", [])
}
}
}
}
}

service_account = lookup(var.node_pools[0], "service_account", local.service_account)
service_account = lookup(var.node_pools[0], "service_account", local.service_account)

tags = concat(
lookup(local.node_pools_tags, "default_values", [true, true])[0] ? [local.cluster_network_tag] : [],
lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["${local.cluster_network_tag}-default-pool"] : [],
lookup(local.node_pools_tags, "all", []),
lookup(local.node_pools_tags, var.node_pools[0].name, []),
)
tags = concat(
lookup(local.node_pools_tags, "default_values", [true, true])[0] ? [local.cluster_network_tag] : [],
lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["${local.cluster_network_tag}-default-pool"] : [],
lookup(local.node_pools_tags, "all", []),
lookup(local.node_pools_tags, var.node_pools[0].name, []),
)

logging_variant = lookup(var.node_pools[0], "logging_variant", "DEFAULT")
logging_variant = lookup(var.node_pools[0], "logging_variant", "DEFAULT")

dynamic "workload_metadata_config" {
for_each = local.cluster_node_metadata_config
dynamic "workload_metadata_config" {
for_each = local.cluster_node_metadata_config

content {
mode = workload_metadata_config.value.mode
content {
mode = workload_metadata_config.value.mode
}
}
}

metadata = local.node_pools_metadata["all"]
metadata = local.node_pools_metadata["all"]

{% if beta_cluster %}
dynamic "sandbox_config" {
for_each = tobool((lookup(var.node_pools[0], "sandbox_enabled", var.sandbox_enabled))) ? ["gvisor"] : []
content {
sandbox_type = sandbox_config.value
{% if beta_cluster %}
dynamic "sandbox_config" {
for_each = tobool((lookup(var.node_pools[0], "sandbox_enabled", var.sandbox_enabled))) ? ["gvisor"] : []
content {
sandbox_type = sandbox_config.value
}
}
}

{% endif %}
boot_disk_kms_key = lookup(var.node_pools[0], "boot_disk_kms_key", var.boot_disk_kms_key)
{% endif %}
boot_disk_kms_key = lookup(var.node_pools[0], "boot_disk_kms_key", var.boot_disk_kms_key)

storage_pools = lookup(var.node_pools[0], "storage_pools", null) != null ? [var.node_pools[0].storage_pools] : []
storage_pools = lookup(var.node_pools[0], "storage_pools", null) != null ? [var.node_pools[0].storage_pools] : []

shielded_instance_config {
enable_secure_boot = lookup(var.node_pools[0], "enable_secure_boot", false)
enable_integrity_monitoring = lookup(var.node_pools[0], "enable_integrity_monitoring", true)
}
shielded_instance_config {
enable_secure_boot = lookup(var.node_pools[0], "enable_secure_boot", false)
enable_integrity_monitoring = lookup(var.node_pools[0], "enable_integrity_monitoring", true)
}

local_ssd_encryption_mode = lookup(var.node_pools[0], "local_ssd_encryption_mode", null)
max_run_duration = lookup(var.node_pools[0], "max_run_duration", null)
flex_start = lookup(var.node_pools[0], "flex_start", null)
local_ssd_encryption_mode = lookup(var.node_pools[0], "local_ssd_encryption_mode", null)
max_run_duration = lookup(var.node_pools[0], "max_run_duration", null)
flex_start = lookup(var.node_pools[0], "flex_start", null)
}
Comment on lines +660 to +766
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

critical

The logic for the dynamic node_pool block is to create a default node pool only when var.node_pools is empty. However, the node_config block for this default pool tries to access var.node_pools[0], which will cause a crash since the list is empty.

The configuration for this default node pool should not depend on var.node_pools[0]. You should use the default values directly, or introduce new variables for configuring the default node pool if customization is needed.

For example:

  • image_type = lookup(var.node_pools[0], "image_type", "COS_CONTAINERD") should become image_type = "COS_CONTAINERD".
  • machine_type = lookup(var.node_pools[0], "machine_type", "e2-medium") should become machine_type = "e2-medium".
  • service_account = lookup(var.node_pools[0], "service_account", local.service_account) should become service_account = local.service_account.

This applies to all references to var.node_pools[0] within this node_config block.

}
}
{% endif %}
Expand Down
4 changes: 2 additions & 2 deletions autogen/main/main.tf.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -203,8 +203,8 @@ locals {

{% if autopilot_cluster != true %}
// node pool ID is in the form projects/<project-id>/locations/<location>/clusters/<cluster-name>/nodePools/<nodepool-name>
cluster_name_parts_from_nodepool = split("/", element(values(google_container_node_pool.pools)[*].id, 0))
cluster_name_computed = element(local.cluster_name_parts_from_nodepool, length(local.cluster_name_parts_from_nodepool) - 3)
cluster_name_parts_from_nodepool = length(var.node_pools) == 0 ? [] : split("/", element(values(google_container_node_pool.pools)[*].id, 0))
cluster_name_computed = length(var.node_pools) == 0 ? var.name : element(local.cluster_name_parts_from_nodepool, length(local.cluster_name_parts_from_nodepool) - 3)
{% else %}
// cluster ID is in the form project/location/name
cluster_name_computed = element(split("/", local.cluster_id), length(split("/", local.cluster_id)) - 1)
Expand Down
Loading