Skip to content

Commit

Permalink
OCTOPUS-466: refactor the update lbs and remove lbs
Browse files Browse the repository at this point in the history
Signed-off-by: Paul Bastide <[email protected]>
  • Loading branch information
prb112 committed Nov 7, 2023
1 parent f095e36 commit fe20284
Show file tree
Hide file tree
Showing 4 changed files with 150 additions and 43 deletions.
17 changes: 17 additions & 0 deletions modules/7_post/files/remove_lbs.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
################################################################
# Copyright 2023 - IBM Corporation. All rights reserved
# SPDX-License-Identifier: Apache-2.0
################################################################

# The script updates the haproxy entries for the new intel nodes.

if [ -f /etc/haproxy/haproxy.cfg.backup ]
then
echo "restoring haproxy"
mv -f /etc/haproxy/haproxy.cfg.backup /etc/haproxy/haproxy.cfg || true
fi

echo "Restart haproxy"
sleep 10
systemctl restart haproxy
echo "Done with the haproxy"
39 changes: 0 additions & 39 deletions modules/7_post/files/update-lbs.sh

This file was deleted.

79 changes: 79 additions & 0 deletions modules/7_post/files/update_lbs.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
################################################################
# Copyright 2023 - IBM Corporation. All rights reserved
# SPDX-License-Identifier: Apache-2.0
################################################################

# The script updates the haproxy entries for the new Intel nodes.

## Example
# backend ingress-http
# balance source
# server ZYZ-worker-0-http-router0 192.168.200.254:80 check
# server ZYZ-worker-1-http-router1 192.168.200.79:80 check
# server ZYZ-x86-worker-0-http-router2 10.245.0.45:80 check

# backend ingress-https
# balance source
# server ZYZ-worker-0-https-router0 192.168.200.254:443 check
# server ZYZ-worker-1-https-router1 192.168.200.79:443 check
# server ZYZ-x86-worker-0-http-router2 10.245.0.45:443 check

# Alternatives:
# 1. Replicate ocp4-helper-node templates/haproxy.cfg.j2 and restart haproxy
# - replaces the existing haproxy.cfg with a new and potentially invalid cfg (as it may have changed since install).
# 2. sed / grep replacement which is a pain (see git history for this file)

## Create the vars file
echo "Generate the configuration:"
cat << EOF > vars.yaml
---
workers:
EOF
for INTEL_WORKER in $(oc get nodes -lkubernetes.io/arch=amd64 --no-headers=true -ojson | jq -c '.items[].status.addresses')
do
T_IP=$(echo "${INTEL_WORKER}" | jq -r '.[] | select(.type == "InternalIP").address')
T_HOSTNAME=$(echo "${INTEL_WORKER}" | jq -r '.[] | select(.type == "Hostname").address')
echo "FOUND: ${T_IP} ${T_HOSTNAME}"
cat << EOF >> vars.yaml
- { hostname: '${T_HOSTNAME}', ip: '${T_IP}' }
EOF
done

# Backup the haproxy configuration
echo "Backing up prior configs"
mv /etc/haproxy/haproxy.cfg.backup /etc/haproxy/haproxy.cfg.backup-$(date +%s) || true
cp -f /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.backup

echo "Make the inventory file"
cat << EOF > inventory
[vmhost]
localhost ansible_connection=local ansible_user=root
EOF

echo "Creating the loadbalancer lb.yaml"
cat << EOF > lb.yaml
---
- name: Create the Load Balancer Entries - http/https
hosts: all
tasks:
- name: create the http entries
ansible.builtin.replace:
path: /etc/haproxy/haproxy.cfg
regexp: ".*backend ingress-http\n.*balance source\n"
replace: "backend ingress-http\n balance source\n server {{ item.hostname }}-http-router0 {{ item.ip }}:80 check\n"
loop: "{{ workers }}"
- name: create the https entries
ansible.builtin.replace:
path: /etc/haproxy/haproxy.cfg
regexp: ".*backend ingress-https\n.*balance source\n"
replace: "backend ingress-https\n balance source\n server {{ item.hostname }}-https-router0 {{ item.ip }}:443 check\n"
loop: "{{ workers }}"
EOF

echo "Running the haproxy changes"
ansible-playbook lb.yaml [email protected] -i inventory

echo "Restart haproxy"
sleep 10
systemctl restart haproxy
echo "Done with the haproxy"
58 changes: 54 additions & 4 deletions modules/7_post/post.tf
Original file line number Diff line number Diff line change
Expand Up @@ -187,8 +187,58 @@ EOF
}
}

resource "null_resource" "updating_load_balancers" {
# Dev Note: only on destroy - restore the load balancers
resource "null_resource" "remove_lbs" {
depends_on = [null_resource.patch_nfs_arch_ppc64le]

triggers = {
count_1 = var.worker_1["count"]
count_2 = var.worker_2["count"]
count_3 = var.worker_3["count"]
user = var.rhel_username
timeout = "${var.connection_timeout}m"
name_prefix = "${var.name_prefix}"
private_key = file(var.private_key_file)
host = var.bastion_public_ip
agent = var.ssh_agent
ansible_post_path = local.ansible_post_path
}

connection {
type = "ssh"
user = self.triggers.user
private_key = self.triggers.private_key
host = self.triggers.host
agent = self.triggers.agent
timeout = self.triggers.timeout
}

provisioner "remote-exec" {
inline = [<<EOF
mkdir -p /root/ocp4-upi-compute-powervs-ibmcloud/intel/lbs/
EOF
]
}

provisioner "file" {
source = "${path.module}/files/remove_lbs.sh"
destination = "/root/ocp4-upi-compute-powervs-ibmcloud/intel/lbs/remove_lbs.sh"
}

provisioner "remote-exec" {
when = destroy
on_failure = continue
inline = [<<EOF
cd /root/ocp4-upi-compute-powervs-ibmcloud/intel/lbs/
bash remove_lbs.sh
EOF
]
}
}


resource "null_resource" "updating_load_balancers" {
depends_on = [null_resource.patch_nfs_arch_ppc64le, null_resource.remove_lbs]
connection {
type = "ssh"
user = var.rhel_username
Expand All @@ -206,15 +256,15 @@ EOF
}

provisioner "file" {
source = "${path.module}/files/update-lbs.sh"
destination = "/root/ocp4-upi-compute-powervs-ibmcloud/intel/lbs/update-lbs.sh"
source = "${path.module}/files/update_lbs.sh"
destination = "/root/ocp4-upi-compute-powervs-ibmcloud/intel/lbs/update_lbs.sh"
}

# Dev Note: Updates the load balancers
provisioner "remote-exec" {
inline = [<<EOF
cd /root/ocp4-upi-compute-powervs-ibmcloud/intel/lbs/
bash update-lbs.sh
bash update_lbs.sh
EOF
]
}
Expand Down

0 comments on commit fe20284

Please sign in to comment.