Skip to content

Commit 6e70225

Browse files
authored
fix: added a fix where secondary storage was not being provisioned<br>- added a fix where workload_cluster_name and management_cluster_name outputs were not working (#889)
1 parent 51b4d11 commit 6e70225

File tree

9 files changed

+47
-32
lines changed

9 files changed

+47
-32
lines changed

README.md

Lines changed: 2 additions & 2 deletions
Large diffs are not rendered by default.

cluster.tf

Lines changed: 28 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ locals {
3030
cluster.name => {
3131
crn = cluster.crn
3232
id = cluster.id
33+
cluster_name = cluster.name
3334
resource_group_name = cluster.resource_group_name
3435
resource_group_id = cluster.resource_group_id
3536
vpc_id = cluster.vpc_id
@@ -45,6 +46,7 @@ locals {
4546
cluster.cluster_name => {
4647
crn = cluster.cluster_crn
4748
id = cluster.cluster_id
49+
cluster_name = cluster.cluster_name
4850
resource_group_id = cluster.resource_group_id
4951
vpc_id = cluster.vpc_id
5052
region = var.region
@@ -241,24 +243,26 @@ module "cluster" {
241243
for index, cluster in local.clusters_map : index => cluster
242244
if cluster.kube_type == "openshift"
243245
}
244-
source = "terraform-ibm-modules/base-ocp-vpc/ibm"
245-
version = "3.30.1"
246-
resource_group_id = local.resource_groups[each.value.resource_group]
247-
region = var.region
248-
cluster_name = each.value.cluster_name
249-
vpc_id = each.value.vpc_id
250-
ocp_entitlement = each.value.entitlement
251-
vpc_subnets = each.value.vpc_subnets
252-
access_tags = each.value.access_tags
246+
source = "terraform-ibm-modules/base-ocp-vpc/ibm"
247+
version = "3.31.0"
248+
resource_group_id = local.resource_groups[each.value.resource_group]
249+
region = var.region
250+
cluster_name = each.value.cluster_name
251+
vpc_id = each.value.vpc_id
252+
ocp_entitlement = each.value.entitlement
253+
vpc_subnets = each.value.vpc_subnets
254+
cluster_ready_when = var.wait_till
255+
access_tags = each.value.access_tags
253256
worker_pools = concat(
254257
[
255258
{
256-
subnet_prefix = each.value.subnet_names[0]
257-
pool_name = "default"
258-
machine_type = each.value.machine_type
259-
workers_per_zone = each.value.workers_per_subnet
260-
operating_system = each.value.operating_system
261-
labels = each.value.labels
259+
subnet_prefix = each.value.subnet_names[0]
260+
pool_name = "default"
261+
machine_type = each.value.machine_type
262+
workers_per_zone = each.value.workers_per_subnet
263+
operating_system = each.value.operating_system
264+
labels = each.value.labels
265+
secondary_storage = each.value.secondary_storage
262266
boot_volume_encryption_kms_config = {
263267
crk = each.value.boot_volume_crk_name == null ? null : regex("key:(.*)", module.key_management.key_map[each.value.boot_volume_crk_name].crn)[0]
264268
kms_instance_id = each.value.boot_volume_crk_name == null ? null : regex(".*:(.*):key:.*", module.key_management.key_map[each.value.boot_volume_crk_name].crn)[0]
@@ -269,12 +273,13 @@ module "cluster" {
269273
each.value.worker != null ? [
270274
for pool in each.value.worker :
271275
{
272-
vpc_subnets = pool.vpc_subnets
273-
pool_name = pool.name
274-
machine_type = pool.flavor
275-
workers_per_zone = pool.workers_per_subnet
276-
operating_system = pool.operating_system
277-
labels = pool.labels
276+
vpc_subnets = pool.vpc_subnets
277+
pool_name = pool.name
278+
machine_type = pool.flavor
279+
workers_per_zone = pool.workers_per_subnet
280+
operating_system = pool.operating_system
281+
labels = pool.labels
282+
secondary_storage = pool.secondary_storage
278283
boot_volume_encryption_kms_config = {
279284
crk = pool.boot_volume_crk_name == null ? null : regex("key:(.*)", module.key_management.key_map[pool.boot_volume_crk_name].crn)[0]
280285
kms_instance_id = pool.boot_volume_crk_name == null ? null : regex(".*:(.*):key:.*", module.key_management.key_map[pool.boot_volume_crk_name].crn)[0]
@@ -292,8 +297,8 @@ module "cluster" {
292297
use_existing_cos = true
293298
existing_cos_id = each.value.cos_instance_crn
294299
disable_public_endpoint = coalesce(each.value.disable_public_endpoint, true) # disable if not set or null
295-
verify_worker_network_readiness = each.value.verify_worker_network_readiness
296-
use_private_endpoint = each.value.use_private_endpoint
300+
verify_worker_network_readiness = each.value.verify_cluster_network_readiness
301+
use_private_endpoint = each.value.use_ibm_cloud_private_api_endpoints
297302
addons = each.value.addons
298303
manage_all_addons = each.value.manage_all_addons
299304
disable_outbound_traffic_protection = each.value.disable_outbound_traffic_protection

examples/override-example/override.json

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@
1818
"name": "workload-cluster",
1919
"secondary_storage": "300gb.5iops-tier",
2020
"resource_group": "slz-workload-rg",
21+
"use_ibm_cloud_private_api_endpoints": false,
22+
"verify_cluster_network_readiness": false,
2123
"kms_config": {
2224
"crk_name": "slz-key",
2325
"private_endpoint": true

outputs.tf

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ output "workload_cluster_id" {
8787

8888
output "workload_cluster_name" {
8989
description = "The name of the workload cluster. If the cluster name does not exactly match the prefix-workload-cluster pattern it will be null."
90-
value = lookup(ibm_container_vpc_cluster.cluster, "${var.prefix}-workload-cluster", null) != null ? ibm_container_vpc_cluster.cluster["${var.prefix}-workload-cluster"].name : null
90+
value = lookup(local.cluster_data, "${var.prefix}-workload-cluster", null) != null ? local.cluster_data["${var.prefix}-workload-cluster"].cluster_name : null
9191
}
9292

9393
output "management_cluster_id" {
@@ -97,7 +97,7 @@ output "management_cluster_id" {
9797

9898
output "management_cluster_name" {
9999
description = "The name of the management cluster. If the cluster name does not exactly match the prefix-management-cluster pattern it will be null."
100-
value = lookup(ibm_container_vpc_cluster.cluster, "${var.prefix}-management-cluster", null) != null ? ibm_container_vpc_cluster.cluster["${var.prefix}-management-cluster"].name : null
100+
value = lookup(local.cluster_data, "${var.prefix}-management-cluster", null) != null ? local.cluster_data["${var.prefix}-management-cluster"].cluster_name : null
101101
}
102102

103103
output "cluster_data" {

patterns/roks-quickstart/main.tf

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,8 @@ locals {
4949
"workers_per_subnet": 1,
5050
"entitlement": ${local.entitlement_val},
5151
"disable_public_endpoint": false,
52-
"import_default_worker_pool_on_create" : false
52+
"import_default_worker_pool_on_create" : false,
53+
"use_ibm_cloud_private_api_endpoints": false
5354
}
5455
],
5556
"cos": [

patterns/roks/main.tf

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ module "roks_landing_zone" {
3333
ssh_public_key = var.ssh_public_key
3434
existing_ssh_key_name = var.existing_ssh_key_name
3535
entitlement = var.entitlement
36+
secondary_storage = var.secondary_storage
3637
workers_per_zone = var.workers_per_zone
3738
flavor = var.flavor
3839
kube_version = var.kube_version

patterns/roks/module/config.tf

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -106,8 +106,8 @@ locals {
106106
cluster_force_delete_storage = var.cluster_force_delete_storage
107107
operating_system = var.operating_system
108108
kms_wait_for_apply = var.kms_wait_for_apply
109-
use_private_endpoint = var.use_ibm_cloud_private_api_endpoints
110-
verify_worker_network_readiness = var.verify_cluster_network_readiness
109+
use_ibm_cloud_private_api_endpoints = var.use_ibm_cloud_private_api_endpoints
110+
verify_cluster_network_readiness = var.verify_cluster_network_readiness
111111
import_default_worker_pool_on_create = false
112112
# By default, create dedicated pool for logging
113113
worker_pools = [

patterns/roks/variables.tf

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -247,6 +247,12 @@ variable "entitlement" {
247247
default = null
248248
}
249249

250+
variable "secondary_storage" {
251+
description = "Optionally specify a secondary storage option to attach to all cluster worker nodes. This value is immutable and can't be changed after provisioning. Use the IBM Cloud CLI command ibmcloud ks flavors to find valid options, e.g ibmcloud ks flavor get --flavor bx2.16x64 --provider vpc-gen2 --zone us-south-1."
252+
type = string
253+
default = null
254+
}
255+
250256
variable "cluster_addons" {
251257
type = object({
252258
debug-tool = optional(string)

variables.tf

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -856,8 +856,8 @@ variable "clusters" {
856856
cluster_force_delete_storage = optional(bool, false) # force the removal of persistent storage associated with the cluster during cluster deletion
857857
operating_system = optional(string, null) #The operating system of the workers in the default worker pool. If no value is specified, the current default version OS will be used. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available .
858858
kms_wait_for_apply = optional(bool, true) # make terraform wait until KMS is applied to master and it is ready and deployed
859-
verify_worker_network_readiness = optional(bool, true) # Flag to run a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false.
860-
use_private_endpoint = optional(bool, true) # Flag to force all cluster related api calls to use the IBM Cloud private endpoints.
859+
verify_cluster_network_readiness = optional(bool, true) # Flag to run a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false.
860+
use_ibm_cloud_private_api_endpoints = optional(bool, true) # Flag to force all cluster related api calls to use the IBM Cloud private endpoints.
861861
import_default_worker_pool_on_create = optional(bool) # (Advanced users) Whether to handle the default worker pool as a stand-alone ibm_container_vpc_worker_pool resource on cluster creation. Only set to false if you understand the implications of managing the default worker pool as part of the cluster resource. Set to true to import the default worker pool as a separate resource. Set to false to manage the default worker pool as part of the cluster resource.
862862
allow_default_worker_pool_replacement = optional(bool) # (Advanced users) Set to true to allow the module to recreate a default worker pool. Only use in the case where you are getting an error indicating that the default worker pool cannot be replaced on apply. Once the default worker pool is handled as a stand-alone ibm_container_vpc_worker_pool, if you wish to make any change to the default worker pool which requires the re-creation of the default pool set this variable to true
863863
labels = optional(map(string)) # A list of labels that you want to add to the default worker pool.

0 commit comments

Comments
 (0)