diff --git a/examples/hybrid/lb.tf b/examples/hybrid/lb.tf index eb2cc96..1f3052a 100644 --- a/examples/hybrid/lb.tf +++ b/examples/hybrid/lb.tf @@ -87,8 +87,13 @@ resource "aws_lb_target_group" "external-admin-api" { } locals { - target_groups = [ + target_group_cp = [ aws_lb_target_group.external-admin-api.arn, + aws_lb_target_group.internal-cluster.arn, + aws_lb_target_group.internal-telemetry.arn, + aws_lb_target_group.internal-admin-api.arn + ] + target_group_dp = [ aws_lb_target_group.external-proxy.arn ] } @@ -115,3 +120,91 @@ resource "aws_lb_listener" "admin" { } } +resource "aws_lb" "internal" { + + name = "kong-internal-lb" + internal = true + subnets = module.create_kong_dp.private_subnet_ids + load_balancer_type = "network" + idle_timeout = 60 + tags = var.tags +} + +resource "aws_lb_target_group" "internal-cluster" { + name = "internal-cluster-8005" + port = 8005 + protocol = "TCP" + vpc_id = aws_vpc.vpc.id + + health_check { + healthy_threshold = 5 + interval = 30 + port = 8005 + protocol = "TCP" + unhealthy_threshold = 5 + } +} + +resource "aws_lb_target_group" "internal-telemetry" { + name = "internal-telemetry-8006" + port = 8006 + protocol = "TCP" + vpc_id = aws_vpc.vpc.id + health_check { + healthy_threshold = 5 + interval = 30 + port = 8006 + protocol = "TCP" + unhealthy_threshold = 5 + } +} + +resource "aws_lb_target_group" "internal-admin-api" { + name = "internal-admin-api-8001" # FIX + port = 8001 + protocol = "TCP" + vpc_id = aws_vpc.vpc.id + health_check { + healthy_threshold = 5 + interval = 30 + port = 8001 + protocol = "TCP" + unhealthy_threshold = 5 + } +} + +resource "aws_lb_listener" "cluster" { + + load_balancer_arn = aws_lb.internal.arn + port = 8005 + protocol = "TCP" + + default_action { + target_group_arn = aws_lb_target_group.internal-cluster.arn + type = "forward" + } +} + +resource "aws_lb_listener" "telemetry" { + + load_balancer_arn = aws_lb.internal.arn + port = 8006 + protocol = "TCP" + + default_action { + target_group_arn = aws_lb_target_group.internal-telemetry.arn + type = "forward" + } +} + +resource "aws_lb_listener" "internal-admin" { + + load_balancer_arn = aws_lb.internal.arn + port = 8001 + protocol = "TCP" + + default_action { + target_group_arn = aws_lb_target_group.internal-admin-api.arn + type = "forward" + } +} diff --git a/examples/hybrid/main.tf b/examples/hybrid/main.tf index c3ad1cb..9a13426 100644 --- a/examples/hybrid/main.tf +++ b/examples/hybrid/main.tf @@ -32,11 +32,42 @@ resource "aws_eip" "nat_eip" { depends_on = [aws_internet_gateway.ig] } +resource "aws_security_group" "allow_postgres" { + name = "allow_postgres" + description = "Allow postgres inbound traffic" + vpc_id = aws_vpc.vpc.id + + ingress { + description = "postgresql from VPC" + from_port = 5432 + to_port = 5432 + protocol = "TCP" + cidr_blocks = [aws_vpc.vpc.cidr_block] + } + + ingress { + description = "postgresql from VPC" + from_port = 22 + to_port = 22 + protocol = "TCP" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = var.tags +} + resource "aws_subnet" "public_subnets" { - count = length(module.create_kong_asg.private_subnet_azs) + count = length(module.create_kong_cp.private_subnet_azs) vpc_id = aws_vpc.vpc.id cidr_block = "10.0.${4 + count.index}.0/24" - availability_zone = module.create_kong_asg.private_subnet_azs[count.index] + availability_zone = module.create_kong_cp.private_subnet_azs[count.index] map_public_ip_on_launch = true } @@ -66,18 +97,46 @@ resource "aws_route_table_association" "public" { route_table_id = aws_route_table.public.id } -module "create_kong_asg" { - source = "../../" +locals { + + kong_control_plane_config = { + "KONG_ROLE" = "control_plane" + "KONG_PROXY_LISTEN" = "off" + "KONG_ANONYMOUS_REPORTS" = "off" + "KONG_PORTAL" = "on" + "KONG_VITALS" = "on" + "KONG_AUDIT_LOG" = "on" + "KONG_LOG_LEVEL" = "debug" + } + + kong_data_plane_config = { + "KONG_ROLE" = "data_plane" + "KONG_DATABASE" = "off" + "KONG_LOG_LEVEL" = "debug" + "KONG_ANONYMOUS_REPORTS" = "off" + } + + kong_hybrid_conf = { + cluster_cert = tls_locally_signed_cert.cert.cert_pem + cluster_key = tls_private_key.cert.private_key_pem + endpoint = aws_lb.internal.dns_name + } +} + +module "create_kong_cp" { + source = "../../" + + instance_type = var.instance_type vpc_id = aws_vpc.vpc.id ami_id = data.aws_ami.ubuntu.id key_name = var.key_name region = var.region vpc_cidr_block = aws_vpc.vpc.cidr_block - environment = var.environment - service = var.service - description = var.description iam_instance_profile_name = aws_iam_instance_profile.kong.name - asg_desired_capacity = var.asg_desired_capacity + + asg_desired_capacity = var.asg_desired_capacity + asg_max_size = var.asg_max_size + asg_min_size = var.asg_min_size postgres_config = { master_user = var.postgres_master_user @@ -90,9 +149,47 @@ module "create_kong_asg" { password = var.kong_database_password } - target_group_arns = local.target_groups + target_group_arns = local.target_group_cp - tags = var.tags + kong_config = local.kong_control_plane_config + kong_hybrid_conf = local.kong_hybrid_conf + + environment = var.environment + service = var.service + description = var.description + tags = var.tags +} + +module "create_kong_dp" { + source = "../../" + + instance_type = var.instance_type + vpc_id = aws_vpc.vpc.id + ami_id = data.aws_ami.ubuntu.id + key_name = var.key_name + region = var.region + vpc_cidr_block = aws_vpc.vpc.cidr_block + + iam_instance_profile_name = aws_iam_instance_profile.kong.name + + + asg_desired_capacity = var.asg_desired_capacity + asg_max_size = var.asg_max_size + asg_min_size = var.asg_min_size + + target_group_arns = local.target_group_dp + + skip_rds_creation = true + kong_config = local.kong_data_plane_config + kong_hybrid_conf = local.kong_hybrid_conf + + private_subnets = module.create_kong_cp.private_subnet_ids + availability_zones = module.create_kong_cp.private_subnet_azs + + environment = var.environment + service = var.service + description = var.description + tags = var.tags } resource "aws_route_table" "private" { @@ -106,7 +203,7 @@ resource "aws_route" "private_nat_gateway" { } resource "aws_route_table_association" "private" { - count = length(module.create_kong_asg.private_subnet_ids) - subnet_id = element(module.create_kong_asg.private_subnet_ids, count.index) + count = length(module.create_kong_cp.private_subnet_ids) + subnet_id = element(module.create_kong_cp.private_subnet_ids, count.index) route_table_id = aws_route_table.private.id } diff --git a/examples/hybrid/outputs.tf b/examples/hybrid/outputs.tf index 1b37bf8..386a540 100644 --- a/examples/hybrid/outputs.tf +++ b/examples/hybrid/outputs.tf @@ -1,6 +1,8 @@ locals { proxy = "http://${aws_lb.external.dns_name}:8000" admin_api = "http://${aws_lb.external.dns_name}:8001" + cluster = "http://${aws_lb.internal.dns_name}:8005" + telemetry = "http://${aws_lb.internal.dns_name}:8006" } output "kong-proxy-endpoint" { @@ -10,3 +12,11 @@ output "kong-proxy-endpoint" { output "kong-api-endpoint" { value = local.admin_api } + +output "kong-cluster-endpoint" { + value = local.cluster +} + +output "kong-telemetry-endpoint" { + value = local.telemetry +} diff --git a/examples/hybrid/tls_shared.tf b/examples/hybrid/tls_shared.tf new file mode 100644 index 0000000..f5c1820 --- /dev/null +++ b/examples/hybrid/tls_shared.tf @@ -0,0 +1,50 @@ +# generate certificates for Kong +resource "tls_private_key" "ca" { + algorithm = "ECDSA" + ecdsa_curve = "P384" +} + +resource "tls_self_signed_cert" "ca" { + key_algorithm = tls_private_key.ca.algorithm + private_key_pem = tls_private_key.ca.private_key_pem + is_ca_certificate = true + + validity_period_hours = "12" + allowed_uses = [ + "cert_signing", + "key_encipherment", + "digital_signature", + ] + + subject { + common_name = "kong_clustering" + } + +} + +resource "tls_private_key" "cert" { + algorithm = "ECDSA" + ecdsa_curve = "P384" +} + +resource "tls_cert_request" "cert" { + key_algorithm = tls_private_key.cert.algorithm + private_key_pem = tls_private_key.cert.private_key_pem + + subject { + common_name = "kong_clustering" + } +} + +resource "tls_locally_signed_cert" "cert" { + cert_request_pem = tls_cert_request.cert.cert_request_pem + + ca_key_algorithm = tls_private_key.ca.algorithm + ca_private_key_pem = tls_private_key.ca.private_key_pem + ca_cert_pem = tls_self_signed_cert.ca.cert_pem + + validity_period_hours = "12" + allowed_uses = [ + ] + +} diff --git a/examples/hybrid/variables.tf b/examples/hybrid/variables.tf index 7c151a1..0e03b07 100644 --- a/examples/hybrid/variables.tf +++ b/examples/hybrid/variables.tf @@ -3,6 +3,12 @@ variable "region" { type = string } +variable "instance_type" { + description = "The instance type to use for the kong deployments" + type = string + default = "t3.small" +} + variable "key_name" { description = "The name of an AWS ssh key pari to associate with the instances in the ASG" type = string @@ -14,18 +20,6 @@ variable "kong_database_password" { type = string } -variable "kong_database_name" { - description = "The kong database name" - type = string - default = "kong" -} - -variable "kong_database_user" { - description = "The database use needed to access kong" - type = string - default = "kong" -} - variable "environment" { description = "Resource environment tag (i.e. dev, stage, prod)" type = string @@ -61,10 +55,22 @@ variable "vpc_cidr_block" { type = string } +variable "asg_max_size" { + description = "The maximum size of the auto scale group" + type = string + default = 1 +} + +variable "asg_min_size" { + description = "The minimum size of the auto scale group" + type = string + default = 1 +} + variable "asg_desired_capacity" { description = "The size of the autoscaling group" type = string - default = 2 + default = 1 } variable "postgres_master_user" { @@ -73,9 +79,24 @@ variable "postgres_master_user" { default = "root" } -variable "external_cidr_blocks" { default = ["0.0.0.0/0"] } +variable "kong_database_name" { + description = "The kong database name" + type = string + default = "kong" +} + +variable "kong_database_user" { + description = "The database use needed to access kong" + type = string + default = "kong" +} + +variable "external_cidr_blocks" { + default = ["0.0.0.0/0"] +} variable "tags" { + type = map(string) default = { "Dept" = "Testing", } diff --git a/examples/hybrid_external_database/lb.tf b/examples/hybrid_external_database/lb.tf index eb2cc96..337c458 100644 --- a/examples/hybrid_external_database/lb.tf +++ b/examples/hybrid_external_database/lb.tf @@ -59,7 +59,6 @@ resource "aws_lb_target_group" "external-proxy" { port = 8000 protocol = "HTTP" vpc_id = aws_vpc.vpc.id - health_check { healthy_threshold = 5 interval = 5 @@ -75,7 +74,6 @@ resource "aws_lb_target_group" "external-admin-api" { port = 8001 protocol = "HTTP" vpc_id = aws_vpc.vpc.id - health_check { healthy_threshold = 5 interval = 5 @@ -87,8 +85,13 @@ resource "aws_lb_target_group" "external-admin-api" { } locals { - target_groups = [ + target_group_cp = [ aws_lb_target_group.external-admin-api.arn, + aws_lb_target_group.internal-cluster.arn, + aws_lb_target_group.internal-telemetry.arn, + aws_lb_target_group.internal-admin-api.arn + ] + target_group_dp = [ aws_lb_target_group.external-proxy.arn ] } @@ -115,3 +118,91 @@ resource "aws_lb_listener" "admin" { } } +resource "aws_lb" "internal" { + + name = "kong-internal-lb" + internal = true + subnets = module.create_kong_dp.private_subnet_ids + load_balancer_type = "network" + idle_timeout = 60 + tags = var.tags +} + +resource "aws_lb_target_group" "internal-cluster" { + name = "internal-cluster-8005" + port = 8005 + protocol = "TCP" + vpc_id = aws_vpc.vpc.id + + health_check { + healthy_threshold = 5 + interval = 30 + port = 8005 + protocol = "TCP" + unhealthy_threshold = 5 + } +} + +resource "aws_lb_target_group" "internal-telemetry" { + name = "internal-telemetry-8006" + port = 8006 + protocol = "TCP" + vpc_id = aws_vpc.vpc.id + health_check { + healthy_threshold = 5 + interval = 30 + port = 8006 + protocol = "TCP" + unhealthy_threshold = 5 + } +} + +resource "aws_lb_target_group" "internal-admin-api" { + name = "internal-admin-api-8001" # FIX + port = 8001 + protocol = "TCP" + vpc_id = aws_vpc.vpc.id + health_check { + healthy_threshold = 5 + interval = 30 + port = 8001 + protocol = "TCP" + unhealthy_threshold = 5 + } +} + +resource "aws_lb_listener" "cluster" { + + load_balancer_arn = aws_lb.internal.arn + port = 8005 + protocol = "TCP" + + default_action { + target_group_arn = aws_lb_target_group.internal-cluster.arn + type = "forward" + } +} + +resource "aws_lb_listener" "telemetry" { + + load_balancer_arn = aws_lb.internal.arn + port = 8006 + protocol = "TCP" + + default_action { + target_group_arn = aws_lb_target_group.internal-telemetry.arn + type = "forward" + } +} + +resource "aws_lb_listener" "internal-admin" { + + load_balancer_arn = aws_lb.internal.arn + port = 8001 + protocol = "TCP" + + default_action { + target_group_arn = aws_lb_target_group.internal-admin-api.arn + type = "forward" + } +} diff --git a/examples/hybrid_external_database/main.tf b/examples/hybrid_external_database/main.tf index 2a52b66..effa797 100644 --- a/examples/hybrid_external_database/main.tf +++ b/examples/hybrid_external_database/main.tf @@ -64,10 +64,10 @@ resource "aws_security_group" "allow_postgres" { } resource "aws_subnet" "public_subnets" { - count = length(module.create_kong_asg.private_subnet_azs) + count = length(module.create_kong_cp.private_subnet_azs) vpc_id = aws_vpc.vpc.id cidr_block = "10.0.${4 + count.index}.0/24" - availability_zone = module.create_kong_asg.private_subnet_azs[count.index] + availability_zone = module.create_kong_cp.private_subnet_azs[count.index] map_public_ip_on_launch = true } @@ -123,7 +123,7 @@ data "template_cloudinit_config" "cloud-init" { resource "aws_instance" "external_postgres" { ami = data.aws_ami.ubuntu.id - instance_type = "t3.medium" + instance_type = "t3.small" key_name = var.key_name subnet_id = aws_subnet.public_subnets.0.id vpc_security_group_ids = [aws_security_group.allow_postgres.id] @@ -131,18 +131,46 @@ resource "aws_instance" "external_postgres" { tags = var.tags } -module "create_kong_asg" { - source = "../../" +locals { + + kong_control_plane_config = { + "KONG_ROLE" = "control_plane" + "KONG_PROXY_LISTEN" = "off" + "KONG_ANONYMOUS_REPORTS" = "off" + "KONG_PORTAL" = "on" + "KONG_VITALS" = "on" + "KONG_AUDIT_LOG" = "on" + "KONG_LOG_LEVEL" = "debug" + } + + kong_data_plane_config = { + "KONG_ROLE" = "data_plane" + "KONG_DATABASE" = "off" + "KONG_LOG_LEVEL" = "debug" + "KONG_ANONYMOUS_REPORTS" = "off" + } + + kong_hybrid_conf = { + cluster_cert = tls_locally_signed_cert.cert.cert_pem + cluster_key = tls_private_key.cert.private_key_pem + endpoint = aws_lb.internal.dns_name + } +} + +module "create_kong_cp" { + source = "../../" + + instance_type = var.instance_type vpc_id = aws_vpc.vpc.id ami_id = data.aws_ami.ubuntu.id key_name = var.key_name region = var.region vpc_cidr_block = aws_vpc.vpc.cidr_block - environment = var.environment - service = var.service - description = var.description iam_instance_profile_name = aws_iam_instance_profile.kong.name - asg_desired_capacity = var.asg_desired_capacity + + asg_desired_capacity = var.asg_desired_capacity + asg_max_size = var.asg_max_size + asg_min_size = var.asg_min_size postgres_config = { master_user = var.postgres_master_user @@ -157,10 +185,48 @@ module "create_kong_asg" { password = var.kong_database_password } - target_group_arns = local.target_groups + target_group_arns = local.target_group_cp + + skip_rds_creation = true + kong_config = local.kong_control_plane_config + kong_hybrid_conf = local.kong_hybrid_conf + + environment = var.environment + service = var.service + description = var.description + tags = var.tags +} + +module "create_kong_dp" { + source = "../../" + + instance_type = var.instance_type + vpc_id = aws_vpc.vpc.id + ami_id = data.aws_ami.ubuntu.id + key_name = var.key_name + region = var.region + vpc_cidr_block = aws_vpc.vpc.cidr_block + + iam_instance_profile_name = aws_iam_instance_profile.kong.name + + + asg_desired_capacity = var.asg_desired_capacity + asg_max_size = var.asg_max_size + asg_min_size = var.asg_min_size + + target_group_arns = local.target_group_dp skip_rds_creation = true - tags = var.tags + kong_config = local.kong_data_plane_config + kong_hybrid_conf = local.kong_hybrid_conf + + private_subnets = module.create_kong_cp.private_subnet_ids + availability_zones = module.create_kong_cp.private_subnet_azs + + environment = var.environment + service = var.service + description = var.description + tags = var.tags } resource "aws_route_table" "private" { @@ -174,7 +240,7 @@ resource "aws_route" "private_nat_gateway" { } resource "aws_route_table_association" "private" { - count = length(module.create_kong_asg.private_subnet_ids) - subnet_id = element(module.create_kong_asg.private_subnet_ids, count.index) + count = length(module.create_kong_cp.private_subnet_ids) + subnet_id = element(module.create_kong_cp.private_subnet_ids, count.index) route_table_id = aws_route_table.private.id } diff --git a/examples/hybrid_external_database/outputs.tf b/examples/hybrid_external_database/outputs.tf index 1b37bf8..386a540 100644 --- a/examples/hybrid_external_database/outputs.tf +++ b/examples/hybrid_external_database/outputs.tf @@ -1,6 +1,8 @@ locals { proxy = "http://${aws_lb.external.dns_name}:8000" admin_api = "http://${aws_lb.external.dns_name}:8001" + cluster = "http://${aws_lb.internal.dns_name}:8005" + telemetry = "http://${aws_lb.internal.dns_name}:8006" } output "kong-proxy-endpoint" { @@ -10,3 +12,11 @@ output "kong-proxy-endpoint" { output "kong-api-endpoint" { value = local.admin_api } + +output "kong-cluster-endpoint" { + value = local.cluster +} + +output "kong-telemetry-endpoint" { + value = local.telemetry +} diff --git a/examples/hybrid_external_database/tls_shared.tf b/examples/hybrid_external_database/tls_shared.tf new file mode 100644 index 0000000..f5c1820 --- /dev/null +++ b/examples/hybrid_external_database/tls_shared.tf @@ -0,0 +1,50 @@ +# generate certificates for Kong +resource "tls_private_key" "ca" { + algorithm = "ECDSA" + ecdsa_curve = "P384" +} + +resource "tls_self_signed_cert" "ca" { + key_algorithm = tls_private_key.ca.algorithm + private_key_pem = tls_private_key.ca.private_key_pem + is_ca_certificate = true + + validity_period_hours = "12" + allowed_uses = [ + "cert_signing", + "key_encipherment", + "digital_signature", + ] + + subject { + common_name = "kong_clustering" + } + +} + +resource "tls_private_key" "cert" { + algorithm = "ECDSA" + ecdsa_curve = "P384" +} + +resource "tls_cert_request" "cert" { + key_algorithm = tls_private_key.cert.algorithm + private_key_pem = tls_private_key.cert.private_key_pem + + subject { + common_name = "kong_clustering" + } +} + +resource "tls_locally_signed_cert" "cert" { + cert_request_pem = tls_cert_request.cert.cert_request_pem + + ca_key_algorithm = tls_private_key.ca.algorithm + ca_private_key_pem = tls_private_key.ca.private_key_pem + ca_cert_pem = tls_self_signed_cert.ca.cert_pem + + validity_period_hours = "12" + allowed_uses = [ + ] + +} diff --git a/examples/hybrid_external_database/variables.tf b/examples/hybrid_external_database/variables.tf index fb21ffb..6cec7d8 100644 --- a/examples/hybrid_external_database/variables.tf +++ b/examples/hybrid_external_database/variables.tf @@ -3,6 +3,12 @@ variable "region" { type = string } +variable "instance_type" { + description = "The instance type to use for the kong deployments" + type = string + default = "t3.small" +} + variable "key_name" { description = "The name of an AWS ssh key pari to associate with the instances in the ASG" type = string @@ -49,10 +55,22 @@ variable "vpc_cidr_block" { type = string } +variable "asg_max_size" { + description = "The maximum size of the auto scale group" + type = string + default = 1 +} + +variable "asg_min_size" { + description = "The minimum size of the auto scale group" + type = string + default = 1 +} + variable "asg_desired_capacity" { description = "The size of the autoscaling group" type = string - default = 2 + default = 1 } variable "postgres_master_user" { @@ -76,7 +94,8 @@ variable "kong_database_user" { variable "external_cidr_blocks" { default = ["0.0.0.0/0"] } variable "tags" { + type = map(string) default = { - "Dept" = "Testing", + "Dept" = "Testing" } } diff --git a/main.tf b/main.tf index f46fb78..e68a852 100644 --- a/main.tf +++ b/main.tf @@ -2,14 +2,21 @@ locals { create_private_subnets = length(var.private_subnets) > 0 ? 0 : 1 create_security_groups = length(var.security_group_ids) > 0 ? 0 : 1 + role = lookup(var.kong_config, "KONG_ROLE", "ebedded") + tags = merge(var.tags, { + "service" = var.service, + "environment" = var.environment, + "role" = local.role + }) + # If the module user has specified a postgres_host then we use # that as our endpoint, as we will not be triggering the database module db_info = var.postgres_host != "" ? { endpoint = var.postgres_host database_name = var.kong_database_config.name } : { - endpoint = module.database.0.outputs.endpoint, - database_name = module.database.0.outputs.database_name + endpoint = local.role == "data_plane" ? "" : module.database.0.outputs.endpoint, + database_name = local.role == "data_plane" ? "" : module.database.0.outputs.database_name } security_groups = length(var.security_group_ids) > 0 ? var.security_group_ids : module.security_groups.0.ids @@ -27,21 +34,25 @@ locals { user_data = templatefile("${path.module}/templates/cloud-init.cfg", {}) user_data_script = templatefile("${path.module}/templates/cloud-init.sh", { - PROXY_CONFIG = var.proxy_config - DB_USER = var.kong_database_config.user - DB_HOST = local.db_info.endpoint - DB_NAME = local.db_info.database_name - CE_PKG = var.ce_pkg - EE_PKG = var.ee_pkg - PARAMETER_PATH = local.ssm_parameter_path - REGION = var.region - VPC_CIDR_BLOCK = var.vpc_cidr_block - DECK_VERSION = var.deck_version - MANAGER_HOST = var.manager_host - PORTAL_HOST = var.portal_host - SESSION_SECRET = random_string.session_secret.result - KONG_CONFIG = var.kong_config + proxy_config = var.proxy_config + db_user = var.kong_database_config.user + db_host = local.db_info.endpoint + db_name = local.db_info.database_name + ce_pkg = var.ce_pkg + ee_pkg = var.ee_pkg + parameter_path = local.ssm_parameter_path + region = var.region + vpc_cidr_block = var.vpc_cidr_block + deck_version = var.deck_version + manager_host = var.manager_host + portal_host = var.portal_host + session_secret = random_string.session_secret.result + kong_config = var.kong_config + kong_ports = var.kong_ports + kong_ssl_uris = var.kong_ssl_uris + kong_hybrid_conf = var.kong_hybrid_conf }) + name = format("%s-%s-%s", var.service, var.environment, random_string.prefix.result) } module "security_groups" { @@ -92,7 +103,7 @@ data "template_cloudinit_config" "cloud-init" { } resource "aws_launch_configuration" "kong" { - name_prefix = format("%s-%s-", var.service, var.environment) + name_prefix = local.name image_id = var.ami_id instance_type = var.instance_type iam_instance_profile = var.iam_instance_profile_name @@ -116,8 +127,10 @@ resource "aws_launch_configuration" "kong" { depends_on = [module.database] } + + resource "aws_autoscaling_group" "kong" { - name = format("%s-%s", var.service, var.environment) + name_prefix = local.name vpc_zone_identifier = local.private_subnets launch_configuration = aws_launch_configuration.kong.name @@ -129,30 +142,9 @@ resource "aws_autoscaling_group" "kong" { max_size = var.asg_max_size min_size = var.asg_min_size target_group_arns = var.target_group_arns - tag { - key = "Name" - value = format("%s-%s", var.service, var.environment) - propagate_at_launch = true - } - tag { - key = "Environment" - value = var.environment - propagate_at_launch = true - } - tag { - key = "Description" - value = var.description - propagate_at_launch = true - } - tag { - key = "Service" - value = var.service - propagate_at_launch = true - } dynamic "tag" { - for_each = var.additional_tags - + for_each = local.tags content { key = tag.key value = tag.value @@ -161,6 +153,11 @@ resource "aws_autoscaling_group" "kong" { } } +resource "random_string" "prefix" { + length = 6 + special = false +} + resource "random_string" "session_secret" { length = 32 special = false diff --git a/modules/security_groups/variables.tf b/modules/security_groups/variables.tf index 8e7c702..e594b4f 100644 --- a/modules/security_groups/variables.tf +++ b/modules/security_groups/variables.tf @@ -18,67 +18,9 @@ variable "rules_with_source_cidr_blocks" { protocol = string, cidr_blocks = list(string) })) - default = { - "kong-ingress-proxy-http" = { - type = "ingress", - from_port = 8000, - to_port = 8000, - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - }, - "kong-ingress-api-http" = { - type = "ingress", - from_port = 8001, - to_port = 8001, - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - }, - "kong-ingress-manager-http" = { - type = "ingress", - from_port = 8002, - to_port = 8002, - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - }, - "kong-ingress-portal-gui-http" = { - type = "ingress", - from_port = 8003, - to_port = 8003, - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - }, - "kong-ingress-portal-http" = { - type = "ingress", - from_port = 8004, - to_port = 8004, - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - }, - "kong-egress-80" = { - type = "egress", - from_port = 80, - to_port = 80, - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - }, - "kong-egress-443" = { - type = "egress", - from_port = 443, - to_port = 443, - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - }, - "kong-egress-postgresq" = { - type = "egress", - from_port = 5432, - to_port = 5432, - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } - } + default = {} } - variable "rules_with_source_security_groups" { description = "Security rules for the Kong instance that have another security group for their source" type = map(object({ diff --git a/templates/cloud-init.sh b/templates/cloud-init.sh index 4c87ffd..8e2a66e 100644 --- a/templates/cloud-init.sh +++ b/templates/cloud-init.sh @@ -3,7 +3,7 @@ set -x exec &> /tmp/cloud-init.log -%{ for config_key, config_value in PROXY_CONFIG ~} +%{ for config_key, config_value in proxy_config ~} %{ if config_value != null ~} export ${config_key}="${config_value}" %{ endif ~} @@ -23,8 +23,8 @@ done # Function to grab SSM parameters aws_get_parameter() { - aws ssm --region ${REGION} get-parameter \ - --name "${PARAMETER_PATH}/$1" \ + aws ssm --region ${region} get-parameter \ + --name "${parameter_path}/$1" \ --with-decryption \ --output text \ --query Parameter.Value 2>/dev/null @@ -36,7 +36,7 @@ apt-get upgrade -y apt-get install -y apt-listchanges unattended-upgrades \ ntp runit runit-systemd dnsutils curl telnet pwgen \ - postgresql-client perl libpcre3 awscli + postgresql-client perl libpcre3 awscli jq # Enable auto updates echo "Enabling auto updates" @@ -46,27 +46,46 @@ dpkg-reconfigure -f noninteractive unattended-upgrades # Installing decK # https://github.com/hbagdi/deck -curl -sL https://github.com/hbagdi/deck/releases/download/v${DECK_VERSION}/deck_${DECK_VERSION}_linux_amd64.tar.gz \ +curl -sL https://github.com/hbagdi/deck/releases/download/v${deck_version}/deck_${deck_version}_linux_amd64.tar.gz \ -o deck.tar.gz tar zxf deck.tar.gz deck sudo mv deck /usr/local/bin sudo chown root:kong /usr/local/bin/deck sudo chmod 755 /usr/local/bin/deck +# These certificates are used for +# clustering Kong control plane +# and data plane when used in hybrid +# mode +%{ if lookup(kong_config, "KONG_ROLE", null) != null ~} +mkdir -p /etc/kong_clustering +%{ if kong_hybrid_conf.cluster_cert != "" ~} +cat << EOF >/etc/kong_clustering/cluster.crt +${kong_hybrid_conf.cluster_cert} +EOF +%{ endif ~} + +%{ if kong_hybrid_conf.cluster_key != "" ~} +cat << EOF >/etc/kong_clustering/cluster.key +${kong_hybrid_conf.cluster_key} +EOF +%{ endif ~} +%{ endif ~} # Install Kong echo "Installing Kong" EE_LICENSE=$(aws_get_parameter ee/license) EE_CREDS=$(aws_get_parameter ee/bintray-auth) if [ "$EE_LICENSE" != "placeholder" ]; then - curl -sL https://kong.bintray.com/kong-enterprise-edition-deb/dists/${EE_PKG} \ + curl -sL https://kong.bintray.com/kong-enterprise-edition-deb/dists/${ee_pkg} \ -u $EE_CREDS \ - -o ${EE_PKG} + -o ${ee_pkg} - if [ ! -f ${EE_PKG} ]; then + if [ ! -f ${ee_pkg} ]; then echo "Error: Enterprise edition download failed, aborting." exit 1 fi - dpkg -i ${EE_PKG} + dpkg -i ${ee_pkg} + apt-get -f install -y cat < /etc/kong/license.json $EE_LICENSE @@ -74,25 +93,27 @@ EOF chown root:kong /etc/kong/license.json chmod 640 /etc/kong/license.json else - curl -sL "https://bintray.com/kong/kong-deb/download_file?file_path=${CE_PKG}" \ - -o ${CE_PKG} - dpkg -i ${CE_PKG} + curl -sL "https://bintray.com/kong/kong-deb/download_file?file_path=${ce_pkg}" \ + -o ${ce_pkg} + dpkg -i ${ce_pkg} + apt-get -f install -y fi +%{ if lookup(kong_config, "KONG_ROLE", "embedded") != "data_plane" ~} # Setup database echo "Setting up Kong database" PGPASSWORD=$(aws_get_parameter "db/password/master") DB_PASSWORD=$(aws_get_parameter "db/password") -DB_HOST=${DB_HOST} -DB_NAME=${DB_NAME} +DB_HOST=${db_host} +DB_NAME=${db_name} export PGPASSWORD RESULT=$(psql --host $DB_HOST --username root \ --tuples-only --no-align postgres \ < /etc/systemd/system/kong-gw.service +[Unit] +Description=KongGW +Documentation=https://docs.konghq.com/ +After=syslog.target network.target remote-fs.target nss-lookup.target + +[Service] +ExecStartPre=/usr/local/bin/kong prepare -p /usr/local/kong +ExecStart=/usr/local/openresty/nginx/sbin/nginx -p /usr/local/kong -c nginx.conf +ExecReload=/usr/local/bin/kong prepare -p /usr/local/kong +ExecReload=/usr/local/openresty/nginx/sbin/nginx -p /usr/local/kong -c nginx.conf -s reload +ExecStop=/bin/kill -s QUIT $MAINPID +PrivateTmp=true + +Environment=KONG_NGINX_DAEMON=off +Environment=KONG_PROXY_ACCESS_LOG=syslog:server=unix:/dev/log +Environment=KONG_PROXY_ERROR_LOG=syslog:server=unix:/dev/log +Environment=KONG_ADMIN_ACCESS_LOG=syslog:server=unix:/dev/log +Environment=KONG_ADMIN_ERROR_LOG=syslog:server=unix:/dev/log +EnvironmentFile=/etc/kong/kong_env.conf + +LimitNOFILE=infinity + +[Install] +WantedBy=multi-user.target +EOF # Setup Configuration file -cat < /etc/kong/kong.conf -# kong.conf, Kong configuration file -# Written by Dennis Kelly -# Updated by Dennis Kelly -# -# 2020-01-23: Support for EE Kong Manager Auth -# 2019-09-30: Support for 1.x releases and Dev Portal -# 2018-03-13: Support for 0.12 and load balancing -# 2017-06-20: Initial release -# -# Notes: -# - See kong.conf.default for further information - -# Database settings -database = postgres -pg_host = $DB_HOST -pg_user = ${DB_USER} -pg_password = $DB_PASSWORD -pg_database = $DB_NAME +cat < /etc/kong/kong_env.conf +%{if lookup(kong_config, "KONG_ROLE", "embedded") == "embedded" || lookup(kong_config, "KONG_ROLE", "embedded") == "control_plane" ~} +KONG_DATABASE="postgres" +KONG_PG_HOST="$DB_HOST" +KONG_PG_USER="${db_user}" +KONG_PG_PASSWORD="$DB_PASSWORD" +KONG_PG_DATABASE="$DB_NAME" +%{ endif } # Load balancer headers -real_ip_header = X-Forwarded-For -trusted_ips = 0.0.0.0/0 - -# SSL terminiation is performed by load balancers -proxy_listen = 0.0.0.0:8000 -# For /status to load balancers -admin_listen = 0.0.0.0:8001 +KONG_REAL_IP_HEADER="X-Forwarded-For" +KONG_TRUSTED_IPS="0.0.0.0/0" + +%{if lookup(kong_config, "KONG_ROLE", null) != null ~} +%{if kong_config["KONG_ROLE"] == "data_plane" ~} +KONG_PROXY_LISTEN="0.0.0.0:${kong_ports.proxy}" +%{ else ~} +KONG_ADMIN_LISTEN="0.0.0.0:${kong_ports.admin_api}" +%{ endif ~} +%{ else ~} +KONG_PROXY_LISTEN="0.0.0.0:${kong_ports.proxy}" +KONG_ADMIN_LISTEN="0.0.0.0:${kong_ports.admin_api}" +%{ endif ~} EOF -chmod 640 /etc/kong/kong.conf -chgrp kong /etc/kong/kong.conf +chmod 640 /etc/kong/kong_env.conf +chgrp kong /etc/kong/kong_env.conf if [ "$EE_LICENSE" != "placeholder" ]; then - cat <> /etc/kong/kong.conf - -# Enterprise Edition Settings -# SSL terminiation is performed by load balancers -admin_gui_listen = 0.0.0.0:8002 -portal_gui_listen = 0.0.0.0:8003 -portal_api_listen = 0.0.0.0:8004 - -admin_api_uri = https://${MANAGER_HOST}:8444 -admin_gui_url = https://${MANAGER_HOST}:8445 - -portal = on -portal_gui_protocol = https -portal_gui_host = ${PORTAL_HOST}:8446 -portal_api_url = http://${PORTAL_HOST}:8447 -portal_cors_origins = https://${PORTAL_HOST}:8446, https://${PORTAL_HOST}:8447 - -vitals = on + cat <> /etc/kong/kong_env.conf +KONG_ADMIN_GUI_LISTEN="0.0.0.0:${kong_ports.admin_gui}" +KONG_PORTAL_GUI_LISTEN="0.0.0.0:${kong_ports.portal_gui}" +KONG_PORTAL_API_LISTEN="0.0.0.0:${kong_ports.portal_api}" + +KONG_ADMIN_API_URI="${kong_ssl_uris.admin_api_uri}" +KONG_ADMIN_GUI_URL="${kong_ssl_uris.admin_gui_url}" + +KONG_PORTAL_GUI_PROTOCOL="https" +KONG_PORTAL_GUI_HOST="${kong_ssl_uris.portal_gui_host}" +KONG_PORTAL_API_URL="${kong_ssl_uris.portal_api_url}" +KONG_PORTAL_CORS_ORIGINS="https://${kong_ssl_uris.portal_gui_host}, https://${kong_ssl_uris.portal_api_url}" EOF for DIR in gui lib portal; do @@ -176,82 +213,38 @@ fi chown root:kong /usr/local/kong chmod 2775 /usr/local/kong +%{if lookup(kong_config, "KONG_ROLE", "embedded") == "embedded" || lookup(kong_config, "KONG_ROLE", "embedded") == "control_plane" ~} # Initialize Kong echo "Initializing Kong" + +export KONG_DATABASE="postgres" +export KONG_PG_HOST="$DB_HOST" +export KONG_PG_DATABASE="$DB_NAME" +export KONG_PG_USER="${db_user}" +export KONG_PG_PASSWORD="$DB_PASSWORD" +export KONG_PG_DATABASE="$DB_NAME" + if [ "$EE_LICENSE" != "placeholder" ]; then ADMIN_TOKEN=$(aws_get_parameter "ee/admin/token") - sudo -u kong KONG_PASSWORD=$ADMIN_TOKEN kong migrations bootstrap + kong KONG_PASSWORD=$ADMIN_TOKEN kong migrations bootstrap else - sudo -u kong kong migrations bootstrap + kong migrations bootstrap fi -cat <<'EOF' > /usr/local/kong/nginx.conf -worker_processes auto; -daemon off; - -pid pids/nginx.pid; -error_log logs/error.log notice; - -worker_rlimit_nofile 65536; - -events { - worker_connections 8192; - multi_accept on; -} - -http { - include nginx-kong.conf; -} -EOF -chown root:kong /usr/local/kong/nginx.conf - -# Log rotation -cat <<'EOF' > /etc/logrotate.d/kong -/usr/local/kong/logs/*.log { - rotate 14 - daily - compress - missingok - notifempty - create 640 kong kong - sharedscripts - - postrotate - /usr/bin/sv 1 /etc/sv/kong - endscript -} -EOF - -# Start Kong under supervision -echo "Starting Kong under supervision" -mkdir -p /etc/sv/kong /etc/sv/kong/log - -cat <<'EOF' > /etc/sv/kong/run -#!/bin/sh -e -exec 2>&1 - -ulimit -n 65536 -sudo -u kong kong prepare -exec chpst -u kong /usr/local/openresty/nginx/sbin/nginx -p /usr/local/kong -c nginx.conf -EOF - -cat <<'EOF' > /etc/sv/kong/log/run -#!/bin/sh -e - -[ -d /var/log/kong ] || mkdir -p /var/log/kong -chown kong:kong /var/log/kong - -exec chpst -u kong /usr/bin/svlogd -tt /var/log/kong -EOF -chmod 744 /etc/sv/kong/run /etc/sv/kong/log/run - -cd /etc/service -ln -s /etc/sv/kong +unset KONG_DATABASE +unset KONG_PG_HOST +unset KONG_PG_DATABASE +unset KONG_PG_USER +unset KONG_PG_PASSWORD +unset KONG_PG_DATABASE +%{ endif ~} +systemctl enable --now kong-gw +%{if lookup(kong_config, "KONG_ROLE", "embedded") == "embedded" || lookup(kong_config, "KONG_ROLE", "embedded") == "control_plane" ~} # Verify Admin API is up RUNNING=0 for I in 1 2 3 4 5 6 7 8 9; do - curl -s -I http://localhost:8001/status | grep -q "200 OK" + curl -s -I http://localhost:${kong_ports.admin_api}/status | grep -q "200 OK" if [ $? = 0 ]; then RUNNING=1 break @@ -265,57 +258,77 @@ if [ $RUNNING = 0 ]; then fi # Enable healthchecks using a kong endpoint -curl -s -I http://localhost:8000/status | grep -q "200 OK" +curl -s localhost:${kong_ports.admin_api}/services | \ + jq -e -r '.data[] | select(.name | contains("status")) | if .id !="" then .name else false end' if [ $? != 0 ]; then echo "Configuring healthcheck" - curl -s -X POST http://localhost:8001/services \ + curl -s -X POST http://localhost:${kong_ports.admin_api}/services \ -d name=status \ - -d host=localhost \ - -d port=8001 \ - -d path=/status > /dev/null - curl -s -X POST http://localhost:8001/services/status/routes \ + -d url=http://httpbin.org/get > /dev/null + curl -s -X POST http://localhost:${kong_ports.admin_api}/services/status/routes \ -d name=status \ -d 'methods[]=HEAD' \ -d 'methods[]=GET' \ -d 'paths[]=/status' > /dev/null - curl -s -X POST http://localhost:8001/services/status/plugins \ + curl -s -X POST http://localhost:${kong_ports.admin_api}/services/status/plugins \ -d name=ip-restriction \ -d "config.whitelist=127.0.0.1" \ - -d "config.whitelist=${VPC_CIDR_BLOCK}" > /dev/null + -d "config.whitelist=${vpc_cidr_block}" > /dev/null fi if [ "$EE_LICENSE" != "placeholder" ]; then echo "Configuring enterprise edition settings" # Monitor role, endpoints, user, for healthcheck - curl -s -X GET -I http://localhost:8001/rbac/roles/monitor | grep -q "200 OK" + curl -s -X GET -I http://localhost:${kong_ports.admin_api}/rbac/roles/monitor | grep -q "200 OK" if [ $? != 0 ]; then COMMENT="Load balancer access to /status" - curl -s -X POST http://localhost:8001/rbac/roles \ + curl -s -X POST http://localhost:${kong_ports.admin_api}/rbac/roles \ -d name=monitor \ -d comment="$COMMENT" > /dev/null - curl -s -X POST http://localhost:8001/rbac/roles/monitor/endpoints \ + curl -s -X POST http://localhost:${kong_ports.admin_api}/rbac/roles/monitor/endpoints \ -d endpoint=/status -d actions=read \ -d comment="$COMMENT" > /dev/null - curl -s -X POST http://localhost:8001/rbac/users \ + curl -s -X POST http://localhost:${kong_ports.admin_api}/rbac/users \ -d name=monitor -d user_token=monitor \ -d comment="$COMMENT" > /dev/null - curl -s -X POST http://localhost:8001/rbac/users/monitor/roles \ + curl -s -X POST http://localhost:${kong_ports.admin_api}/rbac/users/monitor/roles \ -d roles=monitor > /dev/null # Add authentication token for /status - curl -s -X POST http://localhost:8001/services/status/plugins \ + curl -s -X POST http://localhost:${kong_ports.admin_api}/services/status/plugins \ -d name=request-transformer \ -d 'config.add.headers[]=Kong-Admin-Token:monitor' > /dev/null fi - sv stop /etc/sv/kong - cat <> /etc/kong/kong.conf -enforce_rbac = on -admin_gui_auth = basic-auth -admin_gui_session_conf = { "secret":"${SESSION_SECRET}", "cookie_secure":false } + cat <> /etc/kong/kong_env.conf +%{ if lookup(kong_config, "KONG_ADMIN_GUI_SESSION_CONF", null) == null } +KONG_ADMIN_GUI_SESSION_CONF="{\"secret\":\"${session_secret}\",\"cookie_secure\":false}" +%{ endif } EOF - - sv start /etc/sv/kong fi +%{ endif } + +cat <> /etc/kong/kong_env.conf +%{ if lookup(kong_config, "KONG_ROLE", null) == "control_plane" ~} +KONG_CLUSTER_MTLS="shared" +KONG_CLUSTER_CERT="/etc/kong_clustering/cluster.crt" +KONG_CLUSTER_CERT_KEY="/etc/kong_clustering/cluster.key" +%{ endif ~} + +%{ if lookup(kong_config, "KONG_ROLE", null) == "data_plane" ~} +KONG_CLUSTER_MTLS="shared" +KONG_CLUSTER_CERT="/etc/kong_clustering/cluster.crt" +KONG_CLUSTER_CERT_KEY="/etc/kong_clustering/cluster.key" +KONG_LUA_SSL_TRUSTED_CERTIFICATE="/etc/kong_clustering/cluster.crt" +KONG_CLUSTER_CONTROL_PLANE="${kong_hybrid_conf.endpoint}:${kong_ports.cluster}" +KONG_CLUSTER_TELEMETRY_ENDPOINT="${kong_hybrid_conf.endpoint}:${kong_ports.telemetry}" +%{ endif ~} + +%{ for key, value in kong_config ~} +${key}="${value}" +%{ endfor ~} +EOF + +systemctl restart kong-gw diff --git a/variables.tf b/variables.tf index c90a240..ab6c2c3 100644 --- a/variables.tf +++ b/variables.tf @@ -48,12 +48,6 @@ variable "placement_tenancy" { default = "default" } -variable "kong_config" { - description = "A map of key value pairs that describe the Kong GW config, used when constructing the userdata script" - type = map(string) - default = {} -} - variable "root_block_size" { description = "The size of the root block device to attach to each instance" type = number @@ -166,13 +160,13 @@ variable "kong_database_config" { variable "ce_pkg" { description = "Filename of the Community Edition package" type = string - default = "kong-1.5.0.bionic.amd64.deb" # todo: update + default = "kong-2.3.2.focal.amd64.deb" } variable "ee_pkg" { description = "Filename of the Enterprise Edition package" type = string - default = "kong-enterprise-edition-1.3.0.1.bionic.all.deb" # todo: update + default = "kong-enterprise-eition-2.3.2.0.focal.all.deb" } variable "region" { @@ -249,7 +243,7 @@ variable "asg_desired_capacity" { variable "asg_health_check_grace_period" { description = "Time in seconds after instance comes into service before checking health" type = string - default = 300 + default = 600 } variable "rules_with_source_cidr_blocks" { @@ -304,6 +298,20 @@ variable "rules_with_source_cidr_blocks" { protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] }, + "kong-ingress-8005" = { + type = "ingress", + from_port = 8005, + to_port = 8005, + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + }, + "kong-ingress-8006" = { + type = "ingress", + from_port = 8006, + to_port = 8006, + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + }, "kong-egress-80" = { type = "egress", from_port = 80, @@ -332,6 +340,20 @@ variable "rules_with_source_cidr_blocks" { protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] }, + "kong-egress-8005" = { + type = "egress", + from_port = 8005, + to_port = 8005, + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + }, + "kong-egress-8006" = { + type = "egress", + from_port = 8006, + to_port = 8006, + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + }, "kong-egress-postgresq" = { type = "egress", from_port = 5432, @@ -390,6 +412,7 @@ variable "target_group_arns" { variable "tags" { description = "Tags to apply to aws resources" type = map(string) + default = {} } variable "proxy_config" { @@ -405,3 +428,60 @@ variable "proxy_config" { no_proxy = null } } + +variable "kong_ports" { + description = "An object defining the kong http ports" + type = object({ + proxy = number + admin_api = number + admin_gui = number + portal_gui = number + portal_api = number + cluster = number + telemetry = number + }) + default = { + proxy = 8000 + admin_api = 8001 + admin_gui = 8002 + portal_gui = 8003 + portal_api = 8004 + cluster = 8005 + telemetry = 8006 + } +} + +variable "kong_hybrid_conf" { + description = "An object defining the kong http ports" + type = object({ + cluster_cert = string + cluster_key = string + endpoint = string + }) + default = { + cluster_cert = "" + cluster_key = "" + endpoint = "" + } +} +variable "kong_ssl_uris" { + description = "Object containing the ssl uris for kong, e.g. load balancer dns names and ports" + type = object({ + admin_api_uri = string + admin_gui_url = string + portal_gui_host = string + portal_api_url = string + }) + default = { + admin_api_uri = "https://localhost:8444" + admin_gui_url = "https://localhost:8445" + portal_gui_host = "https://localhost:8446" + portal_api_url = "https://localhost:8447" + } +} + +variable "kong_config" { + description = "A map of key value pairs that describe the Kong GW config, used when constructing the userdata script" + type = map(string) + default = {} +}