diff --git a/README.md b/README.md index 12d8473d..530ecc1b 100644 --- a/README.md +++ b/README.md @@ -28,6 +28,7 @@ Optionally, the module supports advanced security group management for the worke * [Submodules](./modules) * [fscloud](./modules/fscloud) * [kube-audit](./modules/kube-audit) + * [worker-pool](./modules/worker-pool) * [Examples](./examples) * [2 MZR clusters in same VPC example](./examples/multiple_mzr_clusters) * [Advanced example (mzr, auto-scale, kms, taints)](./examples/advanced) @@ -296,6 +297,7 @@ Optionally, you need the following permissions to attach Access Management tags | [cbr\_rule](#module\_cbr\_rule) | terraform-ibm-modules/cbr/ibm//modules/cbr-rule-module | 1.33.7 | | [cos\_instance](#module\_cos\_instance) | terraform-ibm-modules/cos/ibm | 10.5.1 | | [existing\_secrets\_manager\_instance\_parser](#module\_existing\_secrets\_manager\_instance\_parser) | terraform-ibm-modules/common-utilities/ibm//modules/crn-parser | 1.2.0 | +| [worker\_pools](#module\_worker\_pools) | ./modules/worker-pool | n/a | ### Resources @@ -308,8 +310,6 @@ Optionally, you need the following permissions to attach Access Management tags | [ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/container_vpc_cluster) | resource | | [ibm_container_vpc_cluster.cluster](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/container_vpc_cluster) | resource | | [ibm_container_vpc_cluster.cluster_with_upgrade](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/container_vpc_cluster) | resource | -| [ibm_container_vpc_worker_pool.autoscaling_pool](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/container_vpc_worker_pool) | resource | -| [ibm_container_vpc_worker_pool.pool](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/container_vpc_worker_pool) | resource | | [ibm_iam_authorization_policy.ocp_secrets_manager_iam_auth_policy](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/iam_authorization_policy) | resource | | [ibm_resource_tag.cluster_access_tag](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/resource_tag) | resource | | [ibm_resource_tag.cos_access_tag](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/resource_tag) | resource | @@ -322,7 +322,6 @@ Optionally, you need the following permissions to attach Access Management tags | [ibm_container_addons.existing_addons](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/data-sources/container_addons) | data source | | [ibm_container_cluster_config.cluster_config](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/data-sources/container_cluster_config) | data source | | [ibm_container_cluster_versions.cluster_versions](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/data-sources/container_cluster_versions) | data source | -| [ibm_container_vpc_worker_pool.all_pools](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/data-sources/container_vpc_worker_pool) | data source | | [ibm_is_lbs.all_lbs](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/data-sources/is_lbs) | data source | | [ibm_is_virtual_endpoint_gateway.api_vpe](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/data-sources/is_virtual_endpoint_gateway) | data source | | [ibm_is_virtual_endpoint_gateway.master_vpe](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/data-sources/is_virtual_endpoint_gateway) | data source | diff --git a/examples/advanced/README.md b/examples/advanced/README.md index fb5cd8af..a1a18605 100644 --- a/examples/advanced/README.md +++ b/examples/advanced/README.md @@ -8,6 +8,7 @@ The following resources are provisioned by this example: - A VPC with subnets across 3 zones. - A public gateway for all the three zones - A multi-zone (3 zone) KMS encrypted OCP VPC cluster, with worker pools in each zone. +- An additional worker pool named `workerpool` is created and attached to the cluster using the `worker-pool` submodule. - Auto scaling enabled for the default worker pool. - Taints against the workers in zone-2 and zone-3. - Enable Kubernetes API server audit logs. diff --git a/examples/advanced/main.tf b/examples/advanced/main.tf index 1e7761e2..bf55dfee 100644 --- a/examples/advanced/main.tf +++ b/examples/advanced/main.tf @@ -152,6 +152,15 @@ locals { effect = "NoExecute" }] } + worker_pool = [ + { + subnet_prefix = "zone-1" + pool_name = "workerpool" + machine_type = "bx2.4x16" + operating_system = "REDHAT_8_64" + workers_per_zone = 2 + } + ] } module "ocp_base" { @@ -186,6 +195,19 @@ data "ibm_container_cluster_config" "cluster_config" { config_dir = "${path.module}/../../kubeconfig" } +######################################################################################################################## +# Worker Pool +######################################################################################################################## + +module "worker_pool" { + source = "../../modules/worker-pool" + resource_group_id = module.resource_group.resource_group_id + vpc_id = ibm_is_vpc.vpc.id + cluster_id = module.ocp_base.cluster_id + vpc_subnets = local.cluster_vpc_subnets + worker_pools = local.worker_pool +} + ######################################################################################################################## # Kube Audit ######################################################################################################################## diff --git a/main.tf b/main.tf index 9692659b..41d483d0 100644 --- a/main.tf +++ b/main.tf @@ -7,9 +7,6 @@ locals { # ibm_container_vpc_cluster automatically names default pool "default" (See https://github.com/IBM-Cloud/terraform-provider-ibm/issues/2849) default_pool = element([for pool in var.worker_pools : pool if pool.pool_name == "default"], 0) - # all_standalone_pools are the pools managed by a 'standalone' ibm_container_vpc_worker_pool resource - all_standalone_pools = [for pool in var.worker_pools : pool if !var.ignore_worker_pool_size_changes] - all_standalone_autoscaling_pools = [for pool in var.worker_pools : pool if var.ignore_worker_pool_size_changes] default_ocp_version = "${data.ibm_container_cluster_versions.cluster_versions.default_openshift_version}_openshift" ocp_version = var.ocp_version == null || var.ocp_version == "default" ? local.default_ocp_version : "${var.ocp_version}_openshift" @@ -466,114 +463,15 @@ data "ibm_container_cluster_config" "cluster_config" { endpoint_type = var.cluster_config_endpoint_type != "default" ? var.cluster_config_endpoint_type : null # null value represents default } -############################################################################## -# Worker Pools -############################################################################## - -locals { - additional_pool_names = var.ignore_worker_pool_size_changes ? [for pool in local.all_standalone_autoscaling_pools : pool.pool_name] : [for pool in local.all_standalone_pools : pool.pool_name] - pool_names = toset(flatten([["default"], local.additional_pool_names])) -} - -data "ibm_container_vpc_worker_pool" "all_pools" { - depends_on = [ibm_container_vpc_worker_pool.autoscaling_pool, ibm_container_vpc_worker_pool.pool] - for_each = local.pool_names - cluster = local.cluster_id - worker_pool_name = each.value -} - -resource "ibm_container_vpc_worker_pool" "pool" { - for_each = { for pool in local.all_standalone_pools : pool.pool_name => pool } - vpc_id = var.vpc_id - resource_group_id = var.resource_group_id - cluster = local.cluster_id - worker_pool_name = each.value.pool_name - flavor = each.value.machine_type - operating_system = each.value.operating_system - worker_count = each.value.workers_per_zone - secondary_storage = each.value.secondary_storage - entitlement = var.ocp_entitlement - labels = each.value.labels - crk = each.value.boot_volume_encryption_kms_config == null ? null : each.value.boot_volume_encryption_kms_config.crk - kms_instance_id = each.value.boot_volume_encryption_kms_config == null ? null : each.value.boot_volume_encryption_kms_config.kms_instance_id - kms_account_id = each.value.boot_volume_encryption_kms_config == null ? null : each.value.boot_volume_encryption_kms_config.kms_account_id - - security_groups = each.value.additional_security_group_ids - - dynamic "zones" { - for_each = each.value.subnet_prefix != null ? var.vpc_subnets[each.value.subnet_prefix] : each.value.vpc_subnets - content { - subnet_id = zones.value.id - name = zones.value.zone - } - } - - # Apply taints to worker pools i.e. all_standalone_pools - dynamic "taints" { - for_each = var.worker_pools_taints == null ? [] : concat(var.worker_pools_taints["all"], lookup(var.worker_pools_taints, each.value["pool_name"], [])) - content { - effect = taints.value.effect - key = taints.value.key - value = taints.value.value - } - } - - timeouts { - # Extend create and delete timeout to 2h - delete = "2h" - create = "2h" - } - - # The default workerpool has to be imported as it will already exist on cluster create - import_on_create = each.value.pool_name == "default" ? var.allow_default_worker_pool_replacement ? null : true : null - orphan_on_delete = each.value.pool_name == "default" ? var.allow_default_worker_pool_replacement ? null : true : null -} - -# copy of the pool resource above which ignores changes to the worker pool for use in autoscaling scenarios -resource "ibm_container_vpc_worker_pool" "autoscaling_pool" { - for_each = { for pool in local.all_standalone_autoscaling_pools : pool.pool_name => pool } - vpc_id = var.vpc_id - resource_group_id = var.resource_group_id - cluster = local.cluster_id - worker_pool_name = each.value.pool_name - flavor = each.value.machine_type - operating_system = each.value.operating_system - worker_count = each.value.workers_per_zone - secondary_storage = each.value.secondary_storage - entitlement = var.ocp_entitlement - labels = each.value.labels - crk = each.value.boot_volume_encryption_kms_config == null ? null : each.value.boot_volume_encryption_kms_config.crk - kms_instance_id = each.value.boot_volume_encryption_kms_config == null ? null : each.value.boot_volume_encryption_kms_config.kms_instance_id - kms_account_id = each.value.boot_volume_encryption_kms_config == null ? null : each.value.boot_volume_encryption_kms_config.kms_account_id - - security_groups = each.value.additional_security_group_ids - - lifecycle { - ignore_changes = [worker_count] - } - - dynamic "zones" { - for_each = each.value.subnet_prefix != null ? var.vpc_subnets[each.value.subnet_prefix] : each.value.vpc_subnets - content { - subnet_id = zones.value.id - name = zones.value.zone - } - } - - # Apply taints to worker pools i.e. all_standalone_pools - - dynamic "taints" { - for_each = var.worker_pools_taints == null ? [] : concat(var.worker_pools_taints["all"], lookup(var.worker_pools_taints, each.value["pool_name"], [])) - content { - effect = taints.value.effect - key = taints.value.key - value = taints.value.value - } - } - - # The default workerpool has to be imported as it will already exist on cluster create - import_on_create = each.value.pool_name == "default" ? var.allow_default_worker_pool_replacement ? null : true : null - orphan_on_delete = each.value.pool_name == "default" ? var.allow_default_worker_pool_replacement ? null : true : null +module "worker_pools" { + source = "./modules/worker-pool" + vpc_id = var.vpc_id + resource_group_id = var.resource_group_id + cluster_id = local.cluster_id + vpc_subnets = var.vpc_subnets + worker_pools = var.worker_pools + ignore_worker_pool_size_changes = var.ignore_worker_pool_size_changes + allow_default_worker_pool_replacement = var.allow_default_worker_pool_replacement } ############################################################################## @@ -605,7 +503,7 @@ resource "null_resource" "confirm_network_healthy" { # Worker pool creation can start before the 'ibm_container_vpc_cluster' completes since there is no explicit # depends_on in 'ibm_container_vpc_worker_pool', just an implicit depends_on on the cluster ID. Cluster ID can exist before # 'ibm_container_vpc_cluster' completes, so hence need to add explicit depends on against 'ibm_container_vpc_cluster' here. - depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool] + depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, module.worker_pools] provisioner "local-exec" { command = "${path.module}/scripts/confirm_network_healthy.sh" @@ -659,7 +557,7 @@ resource "ibm_container_addons" "addons" { # Worker pool creation can start before the 'ibm_container_vpc_cluster' completes since there is no explicit # depends_on in 'ibm_container_vpc_worker_pool', just an implicit depends_on on the cluster ID. Cluster ID can exist before # 'ibm_container_vpc_cluster' completes, so hence need to add explicit depends on against 'ibm_container_vpc_cluster' here. - depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy] + depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, module.worker_pools, null_resource.confirm_network_healthy] cluster = local.cluster_id resource_group_id = var.resource_group_id @@ -732,7 +630,7 @@ resource "kubernetes_config_map_v1_data" "set_autoscaling" { ############################################################################## data "ibm_is_lbs" "all_lbs" { - depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy] + depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, module.worker_pools, null_resource.confirm_network_healthy] count = length(var.additional_lb_security_group_ids) > 0 ? 1 : 0 } @@ -768,19 +666,19 @@ locals { data "ibm_is_virtual_endpoint_gateway" "master_vpe" { count = length(var.additional_vpe_security_group_ids["master"]) - depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy] + depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, module.worker_pools, null_resource.confirm_network_healthy] name = local.vpes_to_attach_to_sg["master"] } data "ibm_is_virtual_endpoint_gateway" "api_vpe" { count = length(var.additional_vpe_security_group_ids["api"]) - depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy] + depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, module.worker_pools, null_resource.confirm_network_healthy] name = local.vpes_to_attach_to_sg["api"] } data "ibm_is_virtual_endpoint_gateway" "registry_vpe" { count = length(var.additional_vpe_security_group_ids["registry"]) - depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy] + depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, module.worker_pools, null_resource.confirm_network_healthy] name = local.vpes_to_attach_to_sg["registry"] } @@ -872,7 +770,7 @@ module "existing_secrets_manager_instance_parser" { resource "ibm_iam_authorization_policy" "ocp_secrets_manager_iam_auth_policy" { count = var.enable_secrets_manager_integration && !var.skip_ocp_secrets_manager_iam_auth_policy ? 1 : 0 - depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool] + depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, module.worker_pools] source_service_name = "containers-kubernetes" source_resource_instance_id = local.cluster_id target_service_name = "secrets-manager" diff --git a/modules/worker-pool/README.md b/modules/worker-pool/README.md new file mode 100644 index 00000000..a90d01c3 --- /dev/null +++ b/modules/worker-pool/README.md @@ -0,0 +1,127 @@ +# Worker pool module + +This module defines and manages worker pools for an IBM Cloud Openshift VPC cluster using the `ibm_container_vpc_worker_pool` resource. It provisions and configures standalone and autoscaling worker pools, handling both pools with optional taints, labels, and encryption configurations. + +## Usage + +``` +module "worker_pools" { + source = "terraform-ibm-modules/base-ocp-vpc/ibm//modules/worker-pool" + version = "X.Y.Z" # Replace "X.Y.Z" with a release version to lock into a specific release + vpc_id = "79cxxxx-xxxx-xxxx-xxxx-xxxxxXX8667" + resource_group_id = "xxXXxxXXxXxXXXXxxXxxxXXXXxXXXXX" + cluster_id = "xxXXxXXXxXxXXXXXxxxx" + vpc_subnets = { + zone-1 = [ + { + cidr_block = "192.168.32.0/22" + id = "0717-afc29fbb-0dbe-493a-a5b9-f3c5899cb8b9" + zone = "us-south-1" + }, + { + cidr_block = "192.168.36.0/22" + id = "0727-d65c1eda-9e38-4200-8452-cb8ff5bb3140" + zone = "us-south-2" + }, + { + cidr_block = "192.168.40.0/22" + id = "0737-9a823cd3-16bf-4ba4-a429-9e1fc7db74b8" + zone = "us-south-3" + } + ] + zone-2 = [ + { + cidr_block = "192.168.0.0/22" + id = "0717-846b9490-34ae-4a6c-8288-28112dca1ba3" + zone = "us-south-1" + }, + { + cidr_block = "192.168.4.0/22" + id = "0727-ef8db7f6-ffa5-4d8b-a317-4631741a45ee" + zone = "us-south-2" + }, + { + cidr_block = "192.168.8.0/22" + id = "0737-c9a6d871-d95b-4914-abf5-82c22f4161d1" + zone = "us-south-3" + } + ] + zone-3 = [ + { + cidr_block = "192.168.16.0/22" + id = "0717-d46e227c-89d4-4b02-9008-d03907a275b6" + zone = "us-south-1" + }, + { + cidr_block = "192.168.20.0/22" + id = "0727-93b1edcb-966c-4517-a7af-6ac63cd93adf" + zone = "us-south-2" + }, + { + cidr_block = "192.168.24.0/22" + id = "0737-807ec4f1-4d84-484e-b2f4-62dd5e431065" + zone = "us-south-3" + } + ] + } + worker_pools = [ + { + subnet_prefix = "default" + pool_name = "default" + machine_type = "bx2.4x16" + workers_per_zone = 2 + operating_system = "REDHAT_8_64" + } + ] + ignore_worker_pool_size_changes = false + allow_default_worker_pool_replacement = false +} +``` + +You need the following permissions to run this module. + +- IAM Services + - **Kubernetes** service + - `Administrator` platform access + - `Manager` service access + + +### Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.9.0 | +| [ibm](#requirement\_ibm) | >= 1.78.2, < 2.0.0 | + +### Modules + +No modules. + +### Resources + +| Name | Type | +|------|------| +| [ibm_container_vpc_worker_pool.autoscaling_pool](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/container_vpc_worker_pool) | resource | +| [ibm_container_vpc_worker_pool.pool](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/container_vpc_worker_pool) | resource | +| [ibm_container_vpc_worker_pool.all_pools](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/data-sources/container_vpc_worker_pool) | data source | + +### Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [allow\_default\_worker\_pool\_replacement](#input\_allow\_default\_worker\_pool\_replacement) | (Advanced users) Set to true to allow the module to recreate a default worker pool. If you wish to make any change to the default worker pool which requires the re-creation of the default pool follow these [steps](https://github.com/terraform-ibm-modules/terraform-ibm-base-ocp-vpc?tab=readme-ov-file#important-considerations-for-terraform-and-default-worker-pool). | `bool` | `false` | no | +| [cluster\_id](#input\_cluster\_id) | ID of the existing openshift cluster. | `string` | n/a | yes | +| [ignore\_worker\_pool\_size\_changes](#input\_ignore\_worker\_pool\_size\_changes) | Enable if using worker autoscaling. Stops Terraform managing worker count | `bool` | `false` | no | +| [ocp\_entitlement](#input\_ocp\_entitlement) | Value that is applied to the entitlements for OCP cluster provisioning | `string` | `null` | no | +| [resource\_group\_id](#input\_resource\_group\_id) | The ID of an existing IBM Cloud resource group where the cluster is grouped. | `string` | n/a | yes | +| [vpc\_id](#input\_vpc\_id) | ID of the VPC instance where this cluster is provisioned. | `string` | n/a | yes | +| [vpc\_subnets](#input\_vpc\_subnets) | Metadata that describes the VPC's subnets. Obtain this information from the VPC where this cluster is created. |
map(list(object({
id = string
zone = string
cidr_block = string
}))) | n/a | yes |
+| [worker\_pools](#input\_worker\_pools) | List of worker pools | list(object({
subnet_prefix = optional(string)
vpc_subnets = optional(list(object({
id = string
zone = string
cidr_block = string
})))
pool_name = string
machine_type = string
workers_per_zone = number
resource_group_id = optional(string)
operating_system = string
labels = optional(map(string))
minSize = optional(number)
secondary_storage = optional(string)
maxSize = optional(number)
enableAutoscaling = optional(bool)
boot_volume_encryption_kms_config = optional(object({
crk = string
kms_instance_id = string
kms_account_id = optional(string)
}))
additional_security_group_ids = optional(list(string))
})) | n/a | yes |
+| [worker\_pools\_taints](#input\_worker\_pools\_taints) | Optional, Map of lists containing node taints by node-pool name | `map(list(object({ key = string, value = string, effect = string })))` | `null` | no |
+
+### Outputs
+
+| Name | Description |
+|------|-------------|
+| [workerpools](#output\_workerpools) | Worker pools created |
+
diff --git a/modules/worker-pool/main.tf b/modules/worker-pool/main.tf
new file mode 100644
index 00000000..f92f0fdc
--- /dev/null
+++ b/modules/worker-pool/main.tf
@@ -0,0 +1,112 @@
+##############################################################################
+# Worker Pools
+##############################################################################
+
+locals {
+ # all_standalone_pools are the pools managed by a 'standalone' ibm_container_vpc_worker_pool resource
+ all_standalone_pools = [for pool in var.worker_pools : pool if !var.ignore_worker_pool_size_changes]
+ all_standalone_autoscaling_pools = [for pool in var.worker_pools : pool if var.ignore_worker_pool_size_changes]
+ additional_pool_names = var.ignore_worker_pool_size_changes ? [for pool in local.all_standalone_autoscaling_pools : pool.pool_name] : [for pool in local.all_standalone_pools : pool.pool_name]
+ pool_names = toset(flatten([["default"], local.additional_pool_names]))
+}
+
+data "ibm_container_vpc_worker_pool" "all_pools" {
+ depends_on = [ibm_container_vpc_worker_pool.autoscaling_pool, ibm_container_vpc_worker_pool.pool]
+ for_each = local.pool_names
+ cluster = var.cluster_id
+ worker_pool_name = each.value
+}
+
+resource "ibm_container_vpc_worker_pool" "pool" {
+ for_each = { for pool in local.all_standalone_pools : pool.pool_name => pool }
+ vpc_id = var.vpc_id
+ resource_group_id = var.resource_group_id
+ cluster = var.cluster_id
+ worker_pool_name = each.value.pool_name
+ flavor = each.value.machine_type
+ operating_system = each.value.operating_system
+ worker_count = each.value.workers_per_zone
+ secondary_storage = each.value.secondary_storage
+ entitlement = var.ocp_entitlement
+ labels = each.value.labels
+ crk = each.value.boot_volume_encryption_kms_config == null ? null : each.value.boot_volume_encryption_kms_config.crk
+ kms_instance_id = each.value.boot_volume_encryption_kms_config == null ? null : each.value.boot_volume_encryption_kms_config.kms_instance_id
+ kms_account_id = each.value.boot_volume_encryption_kms_config == null ? null : each.value.boot_volume_encryption_kms_config.kms_account_id
+
+ security_groups = each.value.additional_security_group_ids
+
+ dynamic "zones" {
+ for_each = each.value.subnet_prefix != null ? var.vpc_subnets[each.value.subnet_prefix] : each.value.vpc_subnets
+ content {
+ subnet_id = zones.value.id
+ name = zones.value.zone
+ }
+ }
+
+ # Apply taints to worker pools i.e. all_standalone_pools
+ dynamic "taints" {
+ for_each = var.worker_pools_taints == null ? [] : concat(var.worker_pools_taints["all"], lookup(var.worker_pools_taints, each.value["pool_name"], []))
+ content {
+ effect = taints.value.effect
+ key = taints.value.key
+ value = taints.value.value
+ }
+ }
+
+ timeouts {
+ # Extend create and delete timeout to 2h
+ delete = "2h"
+ create = "2h"
+ }
+
+ # The default workerpool has to be imported as it will already exist on cluster create
+ import_on_create = each.value.pool_name == "default" ? var.allow_default_worker_pool_replacement ? null : true : null
+ orphan_on_delete = each.value.pool_name == "default" ? var.allow_default_worker_pool_replacement ? null : true : null
+}
+
+# copy of the pool resource above which ignores changes to the worker pool for use in autoscaling scenarios
+resource "ibm_container_vpc_worker_pool" "autoscaling_pool" {
+ for_each = { for pool in local.all_standalone_autoscaling_pools : pool.pool_name => pool }
+ vpc_id = var.vpc_id
+ resource_group_id = var.resource_group_id
+ cluster = var.cluster_id
+ worker_pool_name = each.value.pool_name
+ flavor = each.value.machine_type
+ operating_system = each.value.operating_system
+ worker_count = each.value.workers_per_zone
+ secondary_storage = each.value.secondary_storage
+ entitlement = var.ocp_entitlement
+ labels = each.value.labels
+ crk = each.value.boot_volume_encryption_kms_config == null ? null : each.value.boot_volume_encryption_kms_config.crk
+ kms_instance_id = each.value.boot_volume_encryption_kms_config == null ? null : each.value.boot_volume_encryption_kms_config.kms_instance_id
+ kms_account_id = each.value.boot_volume_encryption_kms_config == null ? null : each.value.boot_volume_encryption_kms_config.kms_account_id
+
+ security_groups = each.value.additional_security_group_ids
+
+ lifecycle {
+ ignore_changes = [worker_count]
+ }
+
+ dynamic "zones" {
+ for_each = each.value.subnet_prefix != null ? var.vpc_subnets[each.value.subnet_prefix] : each.value.vpc_subnets
+ content {
+ subnet_id = zones.value.id
+ name = zones.value.zone
+ }
+ }
+
+ # Apply taints to worker pools i.e. all_standalone_pools
+
+ dynamic "taints" {
+ for_each = var.worker_pools_taints == null ? [] : concat(var.worker_pools_taints["all"], lookup(var.worker_pools_taints, each.value["pool_name"], []))
+ content {
+ effect = taints.value.effect
+ key = taints.value.key
+ value = taints.value.value
+ }
+ }
+
+ # The default workerpool has to be imported as it will already exist on cluster create
+ import_on_create = each.value.pool_name == "default" ? var.allow_default_worker_pool_replacement ? null : true : null
+ orphan_on_delete = each.value.pool_name == "default" ? var.allow_default_worker_pool_replacement ? null : true : null
+}
diff --git a/modules/worker-pool/outputs.tf b/modules/worker-pool/outputs.tf
new file mode 100644
index 00000000..e096c2f0
--- /dev/null
+++ b/modules/worker-pool/outputs.tf
@@ -0,0 +1,4 @@
+output "workerpools" {
+ description = "Worker pools created"
+ value = data.ibm_container_vpc_worker_pool.all_pools
+}
diff --git a/modules/worker-pool/variables.tf b/modules/worker-pool/variables.tf
new file mode 100644
index 00000000..50663a2f
--- /dev/null
+++ b/modules/worker-pool/variables.tf
@@ -0,0 +1,77 @@
+
+variable "worker_pools" {
+ type = list(object({
+ subnet_prefix = optional(string)
+ vpc_subnets = optional(list(object({
+ id = string
+ zone = string
+ cidr_block = string
+ })))
+ pool_name = string
+ machine_type = string
+ workers_per_zone = number
+ resource_group_id = optional(string)
+ operating_system = string
+ labels = optional(map(string))
+ minSize = optional(number)
+ secondary_storage = optional(string)
+ maxSize = optional(number)
+ enableAutoscaling = optional(bool)
+ boot_volume_encryption_kms_config = optional(object({
+ crk = string
+ kms_instance_id = string
+ kms_account_id = optional(string)
+ }))
+ additional_security_group_ids = optional(list(string))
+ }))
+ description = "List of worker pools"
+}
+
+variable "ignore_worker_pool_size_changes" {
+ type = bool
+ description = "Enable if using worker autoscaling. Stops Terraform managing worker count"
+ default = false
+}
+
+variable "worker_pools_taints" {
+ type = map(list(object({ key = string, value = string, effect = string })))
+ description = "Optional, Map of lists containing node taints by node-pool name"
+ default = null
+}
+
+variable "ocp_entitlement" {
+ type = string
+ description = "Value that is applied to the entitlements for OCP cluster provisioning"
+ default = null
+}
+
+variable "vpc_subnets" {
+ type = map(list(object({
+ id = string
+ zone = string
+ cidr_block = string
+ })))
+ description = "Metadata that describes the VPC's subnets. Obtain this information from the VPC where this cluster is created."
+}
+
+variable "allow_default_worker_pool_replacement" {
+ type = bool
+ description = "(Advanced users) Set to true to allow the module to recreate a default worker pool. If you wish to make any change to the default worker pool which requires the re-creation of the default pool follow these [steps](https://github.com/terraform-ibm-modules/terraform-ibm-base-ocp-vpc?tab=readme-ov-file#important-considerations-for-terraform-and-default-worker-pool)."
+ default = false
+ nullable = false
+}
+
+variable "cluster_id" {
+ type = string
+ description = "ID of the existing openshift cluster."
+}
+
+variable "resource_group_id" {
+ type = string
+ description = "The ID of an existing IBM Cloud resource group where the cluster is grouped."
+}
+
+variable "vpc_id" {
+ type = string
+ description = "ID of the VPC instance where this cluster is provisioned."
+}
diff --git a/modules/worker-pool/version.tf b/modules/worker-pool/version.tf
new file mode 100644
index 00000000..d073da59
--- /dev/null
+++ b/modules/worker-pool/version.tf
@@ -0,0 +1,10 @@
+terraform {
+ required_version = ">= 1.9.0"
+ required_providers {
+ # Use "greater than or equal to" range in modules
+ ibm = {
+ source = "ibm-cloud/ibm"
+ version = ">= 1.78.2, < 2.0.0"
+ }
+ }
+}
diff --git a/moved.tf b/moved.tf
new file mode 100644
index 00000000..dee39191
--- /dev/null
+++ b/moved.tf
@@ -0,0 +1,9 @@
+moved {
+ from = ibm_container_vpc_worker_pool.pool
+ to = module.worker_pools.ibm_container_vpc_worker_pool.pool
+}
+
+moved {
+ from = ibm_container_vpc_worker_pool.autoscaling_pool
+ to = module.worker_pools.ibm_container_vpc_worker_pool.autoscaling_pool
+}
diff --git a/outputs.tf b/outputs.tf
index 56c12af4..308e9354 100644
--- a/outputs.tf
+++ b/outputs.tf
@@ -22,7 +22,7 @@ output "cluster_crn" {
output "workerpools" {
description = "Worker pools created"
- value = data.ibm_container_vpc_worker_pool.all_pools
+ value = module.worker_pools.workerpools
}
output "ocp_version" {
diff --git a/tests/pr_test.go b/tests/pr_test.go
index 23cfaf89..cd93f311 100644
--- a/tests/pr_test.go
+++ b/tests/pr_test.go
@@ -99,6 +99,7 @@ func setupQuickstartOptions(t *testing.T, prefix string) *testschematic.TestSche
TarIncludePatterns: []string{
"*.tf",
quickStartTerraformDir + "/*.tf", "scripts/*.sh", "kubeconfig/README.md",
+ "modules/worker-pool/*.tf",
},
TemplateFolder: quickStartTerraformDir,
Tags: []string{"test-schematic"},
@@ -139,7 +140,7 @@ func TestRunFullyConfigurableInSchematics(t *testing.T) {
options := testschematic.TestSchematicOptionsDefault(&testschematic.TestSchematicOptions{
Testing: t,
Prefix: "ocp-fc",
- TarIncludePatterns: []string{"*.tf", fullyConfigurableTerraformDir + "/*.*", fullyConfigurableTerraformDir + "/scripts/*.*", "scripts/*.sh", "kubeconfig/README.md", "modules/kube-audit/*.*", "modules/kube-audit/kubeconfig/README.md", "modules/kube-audit/scripts/*.sh", fullyConfigurableTerraformDir + "/kubeconfig/README.md", "modules/kube-audit/helm-charts/kube-audit/*.*", "modules/kube-audit/helm-charts/kube-audit/templates/*.*"},
+ TarIncludePatterns: []string{"*.tf", fullyConfigurableTerraformDir + "/*.*", fullyConfigurableTerraformDir + "/scripts/*.*", "scripts/*.sh", "kubeconfig/README.md", "modules/kube-audit/*.*", "modules/worker-pool/*.tf", "modules/kube-audit/kubeconfig/README.md", "modules/kube-audit/scripts/*.sh", fullyConfigurableTerraformDir + "/kubeconfig/README.md", "modules/kube-audit/helm-charts/kube-audit/*.*", "modules/kube-audit/helm-charts/kube-audit/templates/*.*"},
TemplateFolder: fullyConfigurableTerraformDir,
Tags: []string{"test-schematic"},
DeleteWorkspaceOnFail: false,
@@ -176,7 +177,7 @@ func TestRunUpgradeFullyConfigurable(t *testing.T) {
options := testschematic.TestSchematicOptionsDefault(&testschematic.TestSchematicOptions{
Testing: t,
Prefix: "fc-upg",
- TarIncludePatterns: []string{"*.tf", fullyConfigurableTerraformDir + "/*.*", fullyConfigurableTerraformDir + "/scripts/*.*", "scripts/*.sh", "kubeconfig/README.md", "modules/kube-audit/*.*", "modules/kube-audit/kubeconfig/README.md", "modules/kube-audit/scripts/*.sh", fullyConfigurableTerraformDir + "/kubeconfig/README.md", "modules/kube-audit/helm-charts/kube-audit/*.*", "modules/kube-audit/helm-charts/kube-audit/templates/*.*"},
+ TarIncludePatterns: []string{"*.tf", fullyConfigurableTerraformDir + "/*.*", fullyConfigurableTerraformDir + "/scripts/*.*", "scripts/*.sh", "kubeconfig/README.md", "modules/kube-audit/*.*", "modules/kube-audit/kubeconfig/README.md", "modules/kube-audit/scripts/*.sh", fullyConfigurableTerraformDir + "/kubeconfig/README.md", "modules/kube-audit/helm-charts/kube-audit/*.*", "modules/kube-audit/helm-charts/kube-audit/templates/*.*", "modules/worker-pool/*.tf"},
TemplateFolder: fullyConfigurableTerraformDir,
Tags: []string{"test-schematic"},
DeleteWorkspaceOnFail: false,
@@ -216,7 +217,6 @@ func TestRunCustomsgExample(t *testing.T) {
CloudInfoService: sharedInfoSvc,
ImplicitDestroy: []string{
"module.ocp_base.null_resource.confirm_network_healthy",
- "module.ocp_base.null_resource.reset_api_key",
},
ImplicitRequired: false,
TerraformVars: map[string]interface{}{