diff --git a/modules/7_post/files/remove_lbs.sh b/modules/7_post/files/remove_lbs.sh new file mode 100644 index 0000000..21b756c --- /dev/null +++ b/modules/7_post/files/remove_lbs.sh @@ -0,0 +1,17 @@ +################################################################ +# Copyright 2023 - IBM Corporation. All rights reserved +# SPDX-License-Identifier: Apache-2.0 +################################################################ + +# The script updates the haproxy entries for the new intel nodes. + +if [ -f /etc/haproxy/haproxy.cfg.backup ] +then + echo "restoring haproxy" + mv -f /etc/haproxy/haproxy.cfg.backup /etc/haproxy/haproxy.cfg || true +fi + +echo "Restart haproxy" +sleep 10 +systemctl restart haproxy +echo "Done with the haproxy" \ No newline at end of file diff --git a/modules/7_post/files/update-lbs.sh b/modules/7_post/files/update-lbs.sh deleted file mode 100644 index da3c04f..0000000 --- a/modules/7_post/files/update-lbs.sh +++ /dev/null @@ -1,39 +0,0 @@ -################################################################ -# Copyright 2023 - IBM Corporation. All rights reserved -# SPDX-License-Identifier: Apache-2.0 -################################################################ - -# The script updates the haproxy entries for the new intel nodes. - -## Example -# backend ingress-http -# balance source -# server ZYZ-worker-0-http-router0 192.168.200.254:80 check -# server ZYZ-worker-1-http-router1 192.168.200.79:80 check -# server ZYZ-x86-worker-0-http-router2 10.245.0.45:80 check - -# backend ingress-https -# balance source -# server ZYZ-worker-0-https-router0 192.168.200.254:443 check -# server ZYZ-worker-1-https-router1 192.168.200.79:443 check -# server ZYZ-x86-worker-0-http-router2 10.245.0.45:443 check - -for INTEL_WORKER in $(oc get nodes -lkubernetes.io/arch=amd64 --no-headers=true -ojson | jq -c '.items[].status.addresses') -do - T_IP=$(echo "${INTEL_WORKER}" | jq -r '.[] | select(.type == "InternalIP").address') - T_HOSTNAME=$(echo "${INTEL_WORKER}" | jq -r '.[] | select(.type == "Hostname").address') - echo "FOUND: ${T_IP} ${T_HOSTNAME}" - - if grep ${T_HOSTNAME}-http-router0 /etc/haproxy/haproxy.cfg - then - continue - fi - HTTP_LN=$(grep -Rn -A3 'backend ingress-http$' /etc/haproxy/haproxy.cfg | grep 'server ' | head -n 1 | sed 's|-| |' | awk '{print $1}') - sed -i.bak "${HTTP_LN}i\ - server ${T_HOSTNAME}-http-router0 ${T_IP}:80 check -" /etc/haproxy/haproxy.cfg - HTTPS_LN=$(grep -Rn -A3 'backend ingress-https$' /etc/haproxy/haproxy.cfg | grep 'server ' | head -n 1 | sed 's|-| |' | awk '{print $1}') - sed -i.bak "${HTTPS_LN}i\ - server ${T_HOSTNAME}-https-router0 ${T_IP}:443 check -" /etc/haproxy/haproxy.cfg -done diff --git a/modules/7_post/files/update_lbs.sh b/modules/7_post/files/update_lbs.sh new file mode 100644 index 0000000..ba91e0b --- /dev/null +++ b/modules/7_post/files/update_lbs.sh @@ -0,0 +1,79 @@ +################################################################ +# Copyright 2023 - IBM Corporation. All rights reserved +# SPDX-License-Identifier: Apache-2.0 +################################################################ + +# The script updates the haproxy entries for the new Intel nodes. + +## Example +# backend ingress-http +# balance source +# server ZYZ-worker-0-http-router0 192.168.200.254:80 check +# server ZYZ-worker-1-http-router1 192.168.200.79:80 check +# server ZYZ-x86-worker-0-http-router2 10.245.0.45:80 check + +# backend ingress-https +# balance source +# server ZYZ-worker-0-https-router0 192.168.200.254:443 check +# server ZYZ-worker-1-https-router1 192.168.200.79:443 check +# server ZYZ-x86-worker-0-http-router2 10.245.0.45:443 check + +# Alternatives: +# 1. Replicate ocp4-helper-node templates/haproxy.cfg.j2 and restart haproxy +# - replaces the existing haproxy.cfg with a new and potentially invalid cfg (as it may have changed since install). +# 2. sed / grep replacement which is a pain (see git history for this file) + +## Create the vars file +echo "Generate the configuration:" +cat << EOF > vars.yaml +--- +workers: +EOF +for INTEL_WORKER in $(oc get nodes -lkubernetes.io/arch=amd64 --no-headers=true -ojson | jq -c '.items[].status.addresses') +do + T_IP=$(echo "${INTEL_WORKER}" | jq -r '.[] | select(.type == "InternalIP").address') + T_HOSTNAME=$(echo "${INTEL_WORKER}" | jq -r '.[] | select(.type == "Hostname").address') + echo "FOUND: ${T_IP} ${T_HOSTNAME}" +cat << EOF >> vars.yaml + - { hostname: '${T_HOSTNAME}', ip: '${T_IP}' } +EOF +done + +# Backup the haproxy configuration +echo "Backing up prior configs" +mv /etc/haproxy/haproxy.cfg.backup /etc/haproxy/haproxy.cfg.backup-$(date +%s) || true +cp -f /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.backup + +echo "Make the inventory file" +cat << EOF > inventory +[vmhost] +localhost ansible_connection=local ansible_user=root +EOF + +echo "Creating the loadbalancer lb.yaml" +cat << EOF > lb.yaml +--- +- name: Create the Load Balancer Entries - http/https + hosts: all + tasks: + - name: create the http entries + ansible.builtin.replace: + path: /etc/haproxy/haproxy.cfg + regexp: ".*backend ingress-http\n.*balance source\n" + replace: "backend ingress-http\n balance source\n server {{ item.hostname }}-http-router0 {{ item.ip }}:80 check\n" + loop: "{{ workers }}" + - name: create the https entries + ansible.builtin.replace: + path: /etc/haproxy/haproxy.cfg + regexp: ".*backend ingress-https\n.*balance source\n" + replace: "backend ingress-https\n balance source\n server {{ item.hostname }}-https-router0 {{ item.ip }}:443 check\n" + loop: "{{ workers }}" +EOF + +echo "Running the haproxy changes" +ansible-playbook lb.yaml --extra-vars=@vars.yaml -i inventory + +echo "Restart haproxy" +sleep 10 +systemctl restart haproxy +echo "Done with the haproxy" \ No newline at end of file diff --git a/modules/7_post/post.tf b/modules/7_post/post.tf index 8263081..a263d9f 100644 --- a/modules/7_post/post.tf +++ b/modules/7_post/post.tf @@ -187,8 +187,58 @@ EOF } } -resource "null_resource" "updating_load_balancers" { +# Dev Note: only on destroy - restore the load balancers +resource "null_resource" "remove_lbs" { depends_on = [null_resource.patch_nfs_arch_ppc64le] + + triggers = { + count_1 = var.worker_1["count"] + count_2 = var.worker_2["count"] + count_3 = var.worker_3["count"] + user = var.rhel_username + timeout = "${var.connection_timeout}m" + name_prefix = "${var.name_prefix}" + private_key = file(var.private_key_file) + host = var.bastion_public_ip + agent = var.ssh_agent + ansible_post_path = local.ansible_post_path + } + + connection { + type = "ssh" + user = self.triggers.user + private_key = self.triggers.private_key + host = self.triggers.host + agent = self.triggers.agent + timeout = self.triggers.timeout + } + + provisioner "remote-exec" { + inline = [<