From 27e167dd8105b1a4afeeb9d25040d85be88333ec Mon Sep 17 00:00:00 2001 From: Paul Bastide Date: Wed, 8 Nov 2023 12:28:21 -0500 Subject: [PATCH] OCTOPUS-532: refactored the resource group, increased the approve and issue sleep, and refactored workers aligning to each zone in a greedy fashion related to the subnet Signed-off-by: Paul Bastide --- modules/0_vpc/create/create.tf | 7 ++++++- modules/1_vpc_prepare/subnets.tf | 1 - modules/6_worker/worker.tf | 13 +++++++++---- modules/7_post/files/approve_and_issue.sh | 5 +++-- 4 files changed, 18 insertions(+), 8 deletions(-) diff --git a/modules/0_vpc/create/create.tf b/modules/0_vpc/create/create.tf index e376cff..eb1fe72 100644 --- a/modules/0_vpc/create/create.tf +++ b/modules/0_vpc/create/create.tf @@ -3,10 +3,15 @@ # SPDX-License-Identifier: Apache-2.0 ################################################################ +# Dev Note: the resource group id (if not set properly) defaults to the first available in the ResourceGroups list. +data "ibm_resource_group" "group" { + name = var.vpc_resource_group +} + # Dev Note: the dns.enable_hub = false by default, we may consider in the future setting it so we don't # have to set a machineconfig with resolv.conf.d settings # Ref https://registry.terraform.io/providers/IBM-Cloud/ibm/latest/docs/resources/is_vpc resource "ibm_is_vpc" "vpc" { name = var.vpc_name - resource_group = var.vpc_resource_group + resource_group = data.ibm_resource_group.group.id } diff --git a/modules/1_vpc_prepare/subnets.tf b/modules/1_vpc_prepare/subnets.tf index 020f521..2b2c75b 100644 --- a/modules/1_vpc_prepare/subnets.tf +++ b/modules/1_vpc_prepare/subnets.tf @@ -142,4 +142,3 @@ resource "ibm_is_subnet_public_gateway_attachment" "attach_pg_worker_zone_3" { subnet = ibm_is_subnet.subnet_worker_zone_3[0].id public_gateway = ibm_is_public_gateway.pg_worker_zone_3[0].id } - diff --git a/modules/6_worker/worker.tf b/modules/6_worker/worker.tf index ed88a6c..73c39e4 100644 --- a/modules/6_worker/worker.tf +++ b/modules/6_worker/worker.tf @@ -14,8 +14,12 @@ data "ibm_is_subnets" "vpc_subnets" { routing_table_name = data.ibm_is_vpc.vpc.default_routing_table_name } +# Dev Note: greedy search for the first matching subnet that uses the worker_1's zone. locals { - vpc_subnet_id = var.create_custom_subnet == true ? data.ibm_is_subnets.vpc_subnets.subnets[0].id : data.ibm_is_vpc.vpc.subnets[0].id + vpc_subnet_id = var.create_custom_subnet == true ? data.ibm_is_subnets.vpc_subnets.subnets[0].id : data.ibm_is_vpc.vpc.subnets[0].id + subnet_for_zone1 = [for subnet in data.ibm_is_subnets.vpc_subnets.subnets : subnet.id if subnet.zone == var.worker_1["zone"]] + subnet_for_zone2 = [for subnet in data.ibm_is_subnets.vpc_subnets.subnets : subnet.id if subnet.zone == var.worker_2["zone"]] + subnet_for_zone3 = [for subnet in data.ibm_is_subnets.vpc_subnets.subnets : subnet.id if subnet.zone == var.worker_3["zone"]] } resource "ibm_is_instance" "workers_1" { @@ -29,7 +33,8 @@ resource "ibm_is_instance" "workers_1" { resource_group = data.ibm_is_vpc.vpc.resource_group primary_network_interface { - subnet = local.vpc_subnet_id #data.ibm_is_vpc.vpc.subnets[0].id + subnet = local.subnet_for_zone1[0] + # local.vpc_subnet_id security_groups = [var.target_worker_sg_id] } @@ -51,7 +56,7 @@ resource "ibm_is_instance" "workers_2" { resource_group = data.ibm_is_vpc.vpc.resource_group primary_network_interface { - subnet = data.ibm_is_vpc.vpc.subnets[1].id + subnet = local.subnet_for_zone2[0] security_groups = [var.target_worker_sg_id] } @@ -73,7 +78,7 @@ resource "ibm_is_instance" "workers_3" { resource_group = data.ibm_is_vpc.vpc.resource_group primary_network_interface { - subnet = data.ibm_is_vpc.vpc.subnets[2].id + subnet = local.subnet_for_zone3[0] security_groups = [var.target_worker_sg_id] } diff --git a/modules/7_post/files/approve_and_issue.sh b/modules/7_post/files/approve_and_issue.sh index 1b964ea..a9c9b66 100644 --- a/modules/7_post/files/approve_and_issue.sh +++ b/modules/7_post/files/approve_and_issue.sh @@ -67,8 +67,9 @@ do fi IDX=$(($IDX + 1)) - # Wait for 10 seconds before we hammer the system - sleep 10 + # Wait for 30 seconds before we hammer the system + echo "Sleeping before re-running - 30 seconds" + sleep 30 # Re-read the 'Ready' count READY_COUNT=$(oc get nodes -l kubernetes.io/arch=amd64 | grep "${MACHINE_PREFIX}" | grep -v NotReady | grep -c Ready)