diff --git a/.github/actions/aurora-manage-cluster/README.md b/.github/actions/aurora-manage-cluster/README.md index a6f98677..d3e20023 100644 --- a/.github/actions/aurora-manage-cluster/README.md +++ b/.github/actions/aurora-manage-cluster/README.md @@ -10,6 +10,7 @@ This action will also install Terraform and awscli. It will output the Aurora cl | name | description | required | default | | --- | --- | --- | --- | +| `aws-region` |

AWS region where the cluster will be deployed

| `true` | `""` | | `cluster-name` |

Name of the RDS Aurora cluster to deploy

| `true` | `""` | | `username` |

Username for the PostgreSQL admin user

| `true` | `""` | | `password` |

Password for the PostgreSQL admin user

| `true` | `""` | @@ -47,6 +48,12 @@ This action is a `composite` action. ```yaml - uses: camunda/camunda-tf-eks-module/.github/actions/aurora-manage-cluster@main with: + aws-region: + # AWS region where the cluster will be deployed + # + # Required: true + # Default: "" + cluster-name: # Name of the RDS Aurora cluster to deploy # diff --git a/.github/actions/aurora-manage-cluster/action.yml b/.github/actions/aurora-manage-cluster/action.yml index 82313cb0..f70af9a8 100644 --- a/.github/actions/aurora-manage-cluster/action.yml +++ b/.github/actions/aurora-manage-cluster/action.yml @@ -6,6 +6,10 @@ description: | This action will also install Terraform and awscli. It will output the Aurora cluster endpoint. inputs: + aws-region: + description: AWS region where the cluster will be deployed + required: true + cluster-name: description: Name of the RDS Aurora cluster to deploy required: true @@ -110,6 +114,8 @@ runs: awscli-version: ${{ inputs.awscli-version }} terraform-version: ${{ inputs.terraform-version }} + aws-region: ${{ inputs.aws-region }} + s3-backend-bucket: ${{ inputs.s3-backend-bucket }} s3-bucket-region: ${{ inputs.s3-bucket-region }} @@ -132,6 +138,8 @@ runs: id: init working-directory: ${{ inputs.tf-modules-path }}/modules/aurora/ run: | + set -euxo pipefail + cp ../fixtures/backend.tf ./ terraform version terraform init -backend-config="bucket=${{ steps.utility.outputs.TFSTATE_BUCKET }}" -backend-config="key=${{ steps.utility.outputs.TFSTATE_KEY }}" \ @@ -143,6 +151,8 @@ runs: id: plan working-directory: ${{ inputs.tf-modules-path }}/modules/aurora/ run: | + set -euxo pipefail + echo '${{ inputs.additional-terraform-vars }}' > /tmp/var.tfvars.json terraform plan -no-color -out aurora.plan \ -var-file=/tmp/var.tfvars.json \ @@ -159,6 +169,8 @@ runs: id: apply working-directory: ${{ inputs.tf-modules-path }}/modules/aurora/ run: | + set -euxo pipefail + terraform apply -no-color aurora.plan export aurora_endpoint="$(terraform output -raw aurora_endpoint)" echo "aurora_endpoint=$aurora_endpoint" >> "$GITHUB_OUTPUT" @@ -168,5 +180,7 @@ runs: id: fetch_outputs working-directory: ${{ inputs.tf-modules-path }}/modules/aurora/ run: | + set -euxo pipefail + all_outputs=$(terraform output -json | jq -c .) echo "all_terraform_outputs=$all_outputs" | tee -a "$GITHUB_OUTPUT" diff --git a/.github/actions/eks-cleanup-resources/README.md b/.github/actions/eks-cleanup-resources/README.md index e41da702..722b5b4e 100644 --- a/.github/actions/eks-cleanup-resources/README.md +++ b/.github/actions/eks-cleanup-resources/README.md @@ -14,6 +14,7 @@ This GitHub Action automates the deletion of EKS resources using a shell script. | `max-age-hours` |

Maximum age of resources in hours

| `false` | `20` | | `target` |

Specify an ID to destroy specific resources or "all" to destroy all resources

| `false` | `all` | | `temp-dir` |

Temporary directory prefix used for storing resource data during processing

| `false` | `./tmp/eks-cleanup/` | +| `module-name` |

Name of the module to destroy (e.g., "eks-cluster", "aurora", "opensearch"), or "all" to destroy all modules

| `false` | `all` | ## Runs @@ -54,4 +55,10 @@ This action is a `composite` action. # # Required: false # Default: ./tmp/eks-cleanup/ + + module-name: + # Name of the module to destroy (e.g., "eks-cluster", "aurora", "opensearch"), or "all" to destroy all modules + # + # Required: false + # Default: all ``` diff --git a/.github/actions/eks-cleanup-resources/action.yml b/.github/actions/eks-cleanup-resources/action.yml index 21525c07..70c75d0e 100644 --- a/.github/actions/eks-cleanup-resources/action.yml +++ b/.github/actions/eks-cleanup-resources/action.yml @@ -4,7 +4,6 @@ name: Delete EKS resources description: | This GitHub Action automates the deletion of EKS resources using a shell script. - inputs: tf-bucket: description: Bucket containing the resources states @@ -26,6 +25,10 @@ inputs: description: Temporary directory prefix used for storing resource data during processing default: ./tmp/eks-cleanup/ + module-name: + description: Name of the module to destroy (e.g., "eks-cluster", "aurora", "opensearch"), or "all" to destroy all modules + default: all + runs: using: composite steps: @@ -33,9 +36,11 @@ runs: id: delete_resources shell: bash run: | + set -euxo pipefail + if [ -n "${{ inputs.tf-bucket-region }}" ]; then export AWS_S3_REGION="${{ inputs.tf-bucket-region }}" fi ${{ github.action_path }}/scripts/destroy.sh "${{ inputs.tf-bucket }}" ${{ github.action_path }}/../../../modules/ \ - "${{ inputs.temp-dir }}" ${{ inputs.max-age-hours }} ${{ inputs.target }} + "${{ inputs.temp-dir }}" ${{ inputs.max-age-hours }} ${{ inputs.target }} ${{ inputs.module-name }} diff --git a/.github/actions/eks-cleanup-resources/scripts/destroy.sh b/.github/actions/eks-cleanup-resources/scripts/destroy.sh index d12e0d59..879fa5dc 100755 --- a/.github/actions/eks-cleanup-resources/scripts/destroy.sh +++ b/.github/actions/eks-cleanup-resources/scripts/destroy.sh @@ -9,7 +9,7 @@ set -o pipefail # is successful, it removes the corresponding S3 objects. # # Usage: -# ./destroy.sh +# ./destroy.sh [MODULE_NAME] # # Arguments: # BUCKET: The name of the S3 bucket containing the resource state files. @@ -17,18 +17,19 @@ set -o pipefail # TEMP_DIR_PREFIX: The prefix for the temporary directories created for each resource. # MIN_AGE_IN_HOURS: The minimum age (in hours) of resources to be destroyed. # ID_OR_ALL: The specific ID suffix to filter objects, or "all" to destroy all objects. +# MODULE_NAME (optional): The name of the module to destroy (e.g., "eks-cluster", "aurora", "opensearch"). Default is "all". # # Example: # ./destroy.sh tf-state-eks-ci-eu-west-3 ./modules/eks/ /tmp/eks/ 24 all -# ./destroy.sh tf-state-eks-ci-eu-west-3 ./modules/eks/ /tmp/eks/ 24 4891048 +# ./destroy.sh tf-state-eks-ci-eu-west-3 ./modules/eks/ /tmp/eks/ 24 4891048 eks-cluster # # Requirements: # - AWS CLI installed and configured with the necessary permissions to access and modify the S3 bucket. # - Terraform installed and accessible in the PATH. # Check for required arguments -if [ "$#" -ne 5 ]; then - echo "Usage: $0 " +if [ "$#" -lt 5 ] || [ "$#" -gt 6 ]; then + echo "Usage: $0 [MODULE_NAME]" exit 1 fi @@ -50,6 +51,7 @@ MODULES_DIR=$2 TEMP_DIR_PREFIX=$3 MIN_AGE_IN_HOURS=$4 ID_OR_ALL=$5 +MODULE_NAME=${6:-all} FAILED=0 CURRENT_DIR=$(pwd) AWS_S3_REGION=${AWS_S3_REGION:-$AWS_REGION} @@ -134,9 +136,7 @@ destroy_resource() { # Execute the terraform destroy command with appropriate variables (see https://github.com/hashicorp/terraform/issues/23552) if [ "$terraform_module" == "eks-cluster" ]; then - if terraform state list | grep -q "kubernetes_storage_class_v1.ebs_sc"; then - terraform state rm "kubernetes_storage_class_v1.ebs_sc" - fi + terraform state rm "kubernetes_storage_class_v1.ebs_sc" || true if ! terraform destroy -auto-approve \ -var="region=$AWS_REGION" \ @@ -152,6 +152,16 @@ destroy_resource() { -var="subnet_ids=[]" \ -var="cidr_blocks=[]" \ -var="vpc_id=vpc-dummy"; then return 1; fi + + elif [ "$terraform_module" == "opensearch" ]; then + if ! terraform destroy -auto-approve \ + -var="domain_name=$cluster_name" \ + -var="vpc_id=vpc-dummy" \ + -var="advanced_security_master_user_password=dummy" \ + -var="vpc_id=vpc-dummy" \ + -var="cidr_blocks=[]" \ + -var="subnet_ids=[]"; then return 1; fi + else echo "Unsupported module: $terraform_module" return 1 @@ -175,60 +185,106 @@ if [ $aws_exit_code -ne 0 ]; then exit 1 fi - +# Categorize resources by module type if [ "$ID_OR_ALL" == "all" ]; then resources=$(echo "$all_objects" | grep "/terraform.tfstate" | awk '{print $4}') else resources=$(echo "$all_objects" | grep "/terraform.tfstate" | grep "$ID_OR_ALL" | awk '{print $4}') fi + # Check if resources is empty (i.e., no objects found) if [ -z "$resources" ]; then echo "No terraform.tfstate objects found in the S3 bucket. Exiting script." >&2 exit 0 fi -current_timestamp=$($date_command +%s) +# Initialise arrays for the resources by module type +aurora_resources=() +opensearch_resources=() +eks_resources=() +# Classify resources into different module types for resource_id in $resources; do - cd "$CURRENT_DIR" || return 1 - terraform_module=$(basename "$(dirname "$resource_id")") - echo "Checking resource $resource_id (terraform module=$terraform_module)" - last_modified=$(aws s3api head-object --bucket "$BUCKET" --key "$resource_id" --output json | grep LastModified | awk -F '"' '{print $4}') - if [ -z "$last_modified" ]; then - echo "Error: Failed to retrieve last modified timestamp for resource $resource_id" - exit 1 - fi + case "$terraform_module" in + aurora) + aurora_resources+=("$resource_id") + ;; + opensearch) + opensearch_resources+=("$resource_id") + ;; + eks-cluster) + eks_resources+=("$resource_id") + ;; + *) + echo "Skipping unsupported module: $terraform_module" + ;; + esac +done - last_modified_timestamp=$($date_command -d "$last_modified" +%s) - if [ -z "$last_modified_timestamp" ]; then - echo "Error: Failed to convert last modified timestamp to seconds since epoch for resource $resource_id" - exit 1 - fi - echo "resource $resource_id last modification: $last_modified ($last_modified_timestamp)" +current_timestamp=$($date_command +%s) - file_age_hours=$(( ($current_timestamp - $last_modified_timestamp) / 3600 )) - if [ -z "$file_age_hours" ]; then - echo "Error: Failed to calculate file age in hours for resource $resource_id" - exit 1 - fi - echo "resource $resource_id is $file_age_hours hours old" +# Function to process the destruction for a specific resource type +process_resources_in_order() { + local resources=("$@") # Accept an array of resources to process - if [ $file_age_hours -ge "$MIN_AGE_IN_HOURS" ]; then - # name of the cluster is always after terraform/ - cluster_name=$(echo "$resource_id" | cut -d'/' -f2) - echo "Destroying resource $resource_id in $terraform_module (cluster_name=$cluster_name)" + for resource_id in "${resources[@]}"; do + cd "$CURRENT_DIR" || return 1 - if ! destroy_resource "$resource_id" "$terraform_module" "$cluster_name"; then - echo "Error destroying resource $resource_id" - FAILED=1 + terraform_module=$(basename "$(dirname "$resource_id")") + echo "Checking resource $resource_id (terraform module=$terraform_module)" + + # Apply module name filter if specified + if [ "$MODULE_NAME" != "all" ] && [ "$MODULE_NAME" != "$terraform_module" ]; then + echo "Skipping resource $resource_id because it does not match the specified module name: $MODULE_NAME" + continue fi - else - echo "Skipping resource $resource_id as it does not meet the minimum age requirement of $MIN_AGE_IN_HOURS hours" - fi -done + last_modified=$(aws s3api head-object --bucket "$BUCKET" --key "$resource_id" --output json | grep LastModified | awk -F '"' '{print $4}') + if [ -z "$last_modified" ]; then + echo "Error: Failed to retrieve last modified timestamp for resource $resource_id" + exit 1 + fi + + last_modified_timestamp=$($date_command -d "$last_modified" +%s) + if [ -z "$last_modified_timestamp" ]; then + echo "Error: Failed to convert last modified timestamp to seconds since epoch for resource $resource_id" + exit 1 + fi + echo "Resource $resource_id last modification: $last_modified ($last_modified_timestamp)" + + file_age_hours=$(( ($current_timestamp - $last_modified_timestamp) / 3600 )) + if [ -z "$file_age_hours" ]; then + echo "Error: Failed to calculate file age in hours for resource $resource_id" + exit 1 + fi + echo "Resource $resource_id is $file_age_hours hours old" + + if [ $file_age_hours -ge "$MIN_AGE_IN_HOURS" ]; then + # Name of the cluster is always after terraform/ + cluster_name=$(echo "$resource_id" | cut -d'/' -f2) + echo "Destroying resource $resource_id in $terraform_module (cluster_name=$cluster_name)" + + if ! destroy_resource "$resource_id" "$terraform_module" "$cluster_name"; then + echo "Error destroying resource $resource_id" + FAILED=1 + fi + else + echo "Skipping resource $resource_id as it does not meet the minimum age requirement of $MIN_AGE_IN_HOURS hours" + fi + done +} + +# Destroy resources in the specific order: Aurora, OpenSearch, then EKS +echo "Destroying Aurora resources..." +process_resources_in_order "${aurora_resources[@]}" + +echo "Destroying OpenSearch resources..." +process_resources_in_order "${opensearch_resources[@]}" + +echo "Destroying EKS resources..." +process_resources_in_order "${eks_resources[@]}" echo "Cleaning up empty folders in s3://$BUCKET" # Loop until no empty folders are found diff --git a/.github/actions/eks-manage-cluster/action.yml b/.github/actions/eks-manage-cluster/action.yml index cc3afc1f..8103449e 100644 --- a/.github/actions/eks-manage-cluster/action.yml +++ b/.github/actions/eks-manage-cluster/action.yml @@ -116,6 +116,8 @@ runs: id: init working-directory: ${{ inputs.tf-modules-path }}/modules/eks-cluster/ run: | + set -euxo pipefail + cp ../fixtures/backend.tf ./ terraform version terraform init -backend-config="bucket=${{ steps.utility.outputs.TFSTATE_BUCKET }}" \ @@ -127,6 +129,8 @@ runs: id: plan working-directory: ${{ inputs.tf-modules-path }}/modules/eks-cluster/ run: | + set -euxo pipefail + echo '${{ inputs.additional-terraform-vars }}' > /tmp/var.tfvars.json terraform plan -no-color -out eks.plan \ -var-file=/tmp/var.tfvars.json \ @@ -139,6 +143,8 @@ runs: id: apply working-directory: ${{ inputs.tf-modules-path }}/modules/eks-cluster/ run: | + set -euxo pipefail + terraform apply -no-color eks.plan export cluster_endpoint="$(terraform output -raw cluster_endpoint)" echo "cluster_endpoint=$cluster_endpoint" >> "$GITHUB_OUTPUT" @@ -148,6 +154,8 @@ runs: id: fetch_outputs working-directory: ${{ inputs.tf-modules-path }}/modules/eks-cluster/ run: | + set -euxo pipefail + all_outputs=$(terraform output -json | jq -c .) echo "all_terraform_outputs=$all_outputs" | tee -a "$GITHUB_OUTPUT" @@ -156,11 +164,15 @@ runs: id: kube_config if: inputs.login == 'true' run: | + set -euxo pipefail + aws eks --region ${{ inputs.aws-region }} update-kubeconfig --name ${{ inputs.cluster-name }} - name: Output Kube Config shell: bash if: inputs.login == 'true' run: | + set -euxo pipefail + kubectl config view kubectl get ns diff --git a/.github/actions/opensearch-manage-cluster/README.md b/.github/actions/opensearch-manage-cluster/README.md new file mode 100644 index 00000000..713b962f --- /dev/null +++ b/.github/actions/opensearch-manage-cluster/README.md @@ -0,0 +1,162 @@ +# Deploy OpenSearch Domain + +## Description + +This GitHub Action automates the deployment of an OpenSearch domain using Terraform. +It will also install Terraform and awscli. It will output the OpenSearch domain endpoint. + + +## Inputs + +| name | description | required | default | +| --- | --- | --- | --- | +| `aws-region` |

AWS region where the cluster will be deployed

| `true` | `""` | +| `domain-name` |

Name of the OpenSearch domain to deploy

| `true` | `""` | +| `engine-version` |

Version of the OpenSearch engine to deploy

| `false` | `2.15` | +| `vpc-id` |

VPC ID to create the domain in

| `true` | `""` | +| `subnet-ids` |

List of subnet IDs to create the domain in

| `true` | `""` | +| `cidr-blocks` |

CIDR blocks to allow access from and to

| `true` | `""` | +| `instance-type` |

Instance type for the OpenSearch cluster

| `false` | `t3.small.search` | +| `instance-count` |

Number of instances in the cluster

| `false` | `3` | +| `additional-terraform-vars` |

JSON object containing additional Terraform variables

| `false` | `{}` | +| `s3-backend-bucket` |

Name of the S3 bucket to store Terraform state

| `true` | `""` | +| `s3-bucket-region` |

Region of the bucket containing the resources states

| `false` | `""` | +| `tf-modules-revision` |

Git revision of the tf modules to use

| `false` | `main` | +| `tf-modules-path` |

Path where the tf OpenSearch modules will be cloned

| `false` | `./.action-tf-modules/opensearch/` | +| `tf-cli-config-credentials-hostname` |

The hostname of a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file. Defaults to app.terraform.io.

| `false` | `app.terraform.io` | +| `tf-cli-config-credentials-token` |

The API token for a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file.

| `false` | `""` | +| `tf-terraform-version` |

The version of Terraform CLI to install. Defaults to latest.

| `false` | `latest` | +| `tf-terraform-wrapper` |

Whether or not to install a wrapper to wrap subsequent calls of the terraform binary and expose its STDOUT, STDERR, and exit code as outputs named stdout, stderr, and exitcode respectively. Defaults to true.

| `false` | `true` | +| `awscli-version` |

Version of the aws cli to use

| `false` | `2.15.52` | + + +## Outputs + +| name | description | +| --- | --- | +| `opensearch-endpoint` |

The endpoint of the deployed OpenSearch domain

| +| `terraform-state-url` |

URL of the Terraform state file in the S3 bucket

| +| `all-terraform-outputs` |

All outputs from Terraform

| + + +## Runs + +This action is a `composite` action. + +## Usage + +```yaml +- uses: camunda/camunda-tf-eks-module/.github/actions/opensearch-manage-cluster@main + with: + aws-region: + # AWS region where the cluster will be deployed + # + # Required: true + # Default: "" + + domain-name: + # Name of the OpenSearch domain to deploy + # + # Required: true + # Default: "" + + engine-version: + # Version of the OpenSearch engine to deploy + # + # Required: false + # Default: 2.15 + + vpc-id: + # VPC ID to create the domain in + # + # Required: true + # Default: "" + + subnet-ids: + # List of subnet IDs to create the domain in + # + # Required: true + # Default: "" + + cidr-blocks: + # CIDR blocks to allow access from and to + # + # Required: true + # Default: "" + + instance-type: + # Instance type for the OpenSearch cluster + # + # Required: false + # Default: t3.small.search + + instance-count: + # Number of instances in the cluster + # + # Required: false + # Default: 3 + + additional-terraform-vars: + # JSON object containing additional Terraform variables + # + # Required: false + # Default: {} + + s3-backend-bucket: + # Name of the S3 bucket to store Terraform state + # + # Required: true + # Default: "" + + s3-bucket-region: + # Region of the bucket containing the resources states + # + # Required: false + # Default: "" + + tf-modules-revision: + # Git revision of the tf modules to use + # + # Required: false + # Default: main + + tf-modules-path: + # Path where the tf OpenSearch modules will be cloned + # + # Required: false + # Default: ./.action-tf-modules/opensearch/ + + tf-cli-config-credentials-hostname: + # The hostname of a HCP Terraform/Terraform Enterprise instance to place within the credentials block + # of the Terraform CLI configuration file. Defaults to `app.terraform.io`. + # + # Required: false + # Default: app.terraform.io + + tf-cli-config-credentials-token: + # The API token for a HCP Terraform/Terraform Enterprise instance to place + # within the credentials block of the Terraform CLI configuration file. + # + # Required: false + # Default: "" + + tf-terraform-version: + # The version of Terraform CLI to install. Defaults to `latest`. + # + # Required: false + # Default: latest + + tf-terraform-wrapper: + # Whether or not to install a wrapper to wrap subsequent calls of the `terraform` binary + # and expose its STDOUT, STDERR, and exit code + # as outputs named `stdout`, `stderr`, and `exitcode` respectively. Defaults to `true`. + # + # Required: false + # Default: true + + awscli-version: + # Version of the aws cli to use + # + # Required: false + # Default: 2.15.52 +``` diff --git a/.github/actions/opensearch-manage-cluster/action.yml b/.github/actions/opensearch-manage-cluster/action.yml new file mode 100644 index 00000000..8f03c593 --- /dev/null +++ b/.github/actions/opensearch-manage-cluster/action.yml @@ -0,0 +1,183 @@ +--- +name: Deploy OpenSearch Domain + +description: | + This GitHub Action automates the deployment of an OpenSearch domain using Terraform. + It will also install Terraform and awscli. It will output the OpenSearch domain endpoint. + +inputs: + aws-region: + description: AWS region where the cluster will be deployed + required: true + + domain-name: + description: Name of the OpenSearch domain to deploy + required: true + + engine-version: + description: Version of the OpenSearch engine to deploy + # TODO: add renovate + default: '2.15' + + vpc-id: + description: VPC ID to create the domain in + required: true + + subnet-ids: + description: List of subnet IDs to create the domain in + required: true + + cidr-blocks: + description: CIDR blocks to allow access from and to + required: true + + instance-type: + description: Instance type for the OpenSearch cluster + default: t3.small.search + + instance-count: + description: Number of instances in the cluster + default: '3' + + additional-terraform-vars: + description: JSON object containing additional Terraform variables + default: '{}' + + s3-backend-bucket: + description: Name of the S3 bucket to store Terraform state + required: true + + s3-bucket-region: + description: Region of the bucket containing the resources states + required: false + + tf-modules-revision: + description: Git revision of the tf modules to use + default: main + + tf-modules-path: + description: Path where the tf OpenSearch modules will be cloned + default: ./.action-tf-modules/opensearch/ + + tf-cli-config-credentials-hostname: + description: | + The hostname of a HCP Terraform/Terraform Enterprise instance to place within the credentials block + of the Terraform CLI configuration file. Defaults to `app.terraform.io`. + default: app.terraform.io + + tf-cli-config-credentials-token: + description: | + The API token for a HCP Terraform/Terraform Enterprise instance to place + within the credentials block of the Terraform CLI configuration file. + required: false + + tf-terraform-version: + description: The version of Terraform CLI to install. Defaults to `latest`. + default: latest + + tf-terraform-wrapper: + description: | + Whether or not to install a wrapper to wrap subsequent calls of the `terraform` binary + and expose its STDOUT, STDERR, and exit code + as outputs named `stdout`, `stderr`, and `exitcode` respectively. Defaults to `true`. + default: 'true' + + awscli-version: + description: Version of the aws cli to use + # renovate: datasource=github-releases depName=aws/aws-cli + default: 2.15.52 + +outputs: + opensearch-endpoint: + description: The endpoint of the deployed OpenSearch domain + value: ${{ steps.fetch_outputs.outputs.opensearch_endpoint }} + + terraform-state-url: + description: URL of the Terraform state file in the S3 bucket + value: ${{ steps.utility.outputs.terraform-state-url }} + + all-terraform-outputs: + description: All outputs from Terraform + value: ${{ steps.fetch_outputs.outputs.all_terraform_outputs }} + +runs: + using: composite + steps: + - name: Use Utility Actions + id: utility + uses: camunda/camunda-tf-eks-module/.github/actions/utility-action@2d49d09f14fb89eea8aa769c1e757089cc7e12bd # 2.5.1 + with: + awscli-version: ${{ inputs.awscli-version }} + terraform-version: ${{ inputs.tf-terraform-version }} + + aws-region: ${{ inputs.aws-region }} + + s3-backend-bucket: ${{ inputs.s3-backend-bucket }} + s3-bucket-region: ${{ inputs.s3-bucket-region }} + + tf-state-key: terraform/${{ inputs.domain-name }}/gha/opensearch/terraform.tfstate + + tf-cli-config-credentials-hostname: ${{ inputs.tf-cli-config-credentials-hostname }} + tf-cli-config-credentials-token: ${{ inputs.tf-cli-config-credentials-token }} + tf-terraform-wrapper: ${{ inputs.tf-terraform-wrapper }} + + - name: Checkout Repository OpenSearch modules + uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4 + with: + repository: camunda/camunda-tf-eks-module + ref: ${{ inputs.tf-modules-revision }} + path: ${{ inputs.tf-modules-path }} + fetch-depth: 0 + + - name: Terraform Init + shell: bash + id: init + working-directory: ${{ inputs.tf-modules-path }}/modules/opensearch/ + run: | + set -euxo pipefail + + cp ../fixtures/backend.tf ./ + terraform version + terraform init -backend-config="bucket=${{ steps.utility.outputs.TFSTATE_BUCKET }}" -backend-config="key=${{ steps.utility.outputs.TFSTATE_KEY }}" \ + -backend-config="region=${{ steps.utility.outputs.TFSTATE_REGION }}" + terraform validate -no-color + + - name: Terraform Plan + shell: bash + id: plan + working-directory: ${{ inputs.tf-modules-path }}/modules/opensearch/ + run: | + set -euxo pipefail + + echo '${{ inputs.additional-terraform-vars }}' > /tmp/var.tfvars.json + terraform plan -no-color -out opensearch.plan \ + -var-file=/tmp/var.tfvars.json \ + -var "domain_name=${{ inputs.domain-name }}" \ + -var "engine_version=${{ inputs.engine-version }}" \ + -var 'subnet_ids=${{ inputs.subnet-ids }}' \ + -var "vpc_id=${{ inputs.vpc-id }}" \ + -var 'cidr_blocks=${{ inputs.cidr-blocks }}' \ + -var "instance_type=${{ inputs.instance-type }}" \ + -var "instance_count=${{ inputs.instance-count }}" + + - name: Terraform Apply + shell: bash + id: apply + working-directory: ${{ inputs.tf-modules-path }}/modules/opensearch/ + run: | + set -euxo pipefail + + terraform apply -no-color opensearch.plan + + - name: Fetch Terraform Outputs + shell: bash + id: fetch_outputs + working-directory: ${{ inputs.tf-modules-path }}/modules/opensearch/ + run: | + set -euxo pipefail + + export opensearch_endpoint="$(terraform output -raw opensearch_domain_endpoint)" + echo "opensearch_endpoint=$opensearch_endpoint" >> "$GITHUB_OUTPUT" + + all_outputs=$(terraform output -json | jq -c .) + echo "all_terraform_outputs=$all_outputs" | tee -a "$GITHUB_OUTPUT" diff --git a/.github/actions/utility-action/action.yml b/.github/actions/utility-action/action.yml index fab4c90f..1da4c276 100644 --- a/.github/actions/utility-action/action.yml +++ b/.github/actions/utility-action/action.yml @@ -79,6 +79,8 @@ runs: - name: Install AWS CLI shell: bash run: | + set -euxo pipefail + if ! command -v aws &> /dev/null; then echo "AWS CLI not found, installing..." curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-${{ inputs.awscli-version }}.zip" -o "awscliv2.zip" @@ -93,6 +95,8 @@ runs: shell: bash id: set-terraform-variables run: | + set -euxo pipefail + export TFSTATE_BUCKET="${{ inputs.s3-backend-bucket }}" export TFSTATE_KEY="${{ inputs.tf-state-key }}" @@ -113,6 +117,8 @@ runs: id: create-s3-bucket shell: bash run: | + set -euxo pipefail + if aws s3api head-bucket --bucket ${{ inputs.s3-backend-bucket }} --region ${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} 2>/dev/null; then echo "Bucket already exists" else diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 98724737..1f69245a 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -9,5 +9,5 @@ on: jobs: lint: - uses: camunda/infraex-common-config/.github/workflows/lint-global.yml@7e7ae626961c7766d7612620374b7c4944f420db # 1.2.3 + uses: camunda/infraex-common-config/.github/workflows/lint-global.yml@1b6af8e7117e4e9bdf777911b7a724879b59fcfe # 1.2.4 secrets: inherit diff --git a/.github/workflows/renovate-automerge.yml b/.github/workflows/renovate-automerge.yml index a42f5c0a..5b6f2173 100644 --- a/.github/workflows/renovate-automerge.yml +++ b/.github/workflows/renovate-automerge.yml @@ -11,5 +11,5 @@ concurrency: jobs: renovate-automerge: - uses: camunda/infraex-common-config/.github/workflows/automerge-global.yml@7e7ae626961c7766d7612620374b7c4944f420db # 1.2.3 + uses: camunda/infraex-common-config/.github/workflows/automerge-global.yml@1b6af8e7117e4e9bdf777911b7a724879b59fcfe # 1.2.4 secrets: inherit diff --git a/.github/workflows/test-gha-eks.yml b/.github/workflows/test-gha-eks.yml index a4926ac6..07db8a13 100644 --- a/.github/workflows/test-gha-eks.yml +++ b/.github/workflows/test-gha-eks.yml @@ -1,5 +1,5 @@ --- -name: EKS Cluster with an AuroraDB creation and destruction test +name: EKS Cluster with an AuroraDB and OpenSearch creation and destruction test on: schedule: @@ -14,6 +14,9 @@ on: create_db: description: Should the aurora db be created default: 'true' + create_opensearch: + description: Should the opensearch domain be created + default: 'true' delete_cluster: description: Whether to delete the cluster. default: 'true' @@ -25,6 +28,14 @@ on: description: Database password. required: false type: string + opensearch_username: + description: OpenSearch username. + required: false + type: string + opensearch_password: + description: OpenSearch password. + required: false + type: string pull_request: # the paths should be synced with ../labeler.yml @@ -35,8 +46,8 @@ on: - modules/eks-cluster/**.tf - modules/aurora/**.tf - .tool-versions - - .github/workflows/test-gha-eks-manage-cluster.yml - - .github/actions/eks-manage-cluster/*.yml + - .github/workflows/test-gha-eks.yml + - .github/actions/*/*.yml # limit to a single execution per actor of this workflow concurrency: @@ -54,6 +65,7 @@ env: TF_STATE_BUCKET_REGION: eu-central-1 CREATE_DB: ${{ github.event.inputs.create_db || 'true' }} + CREATE_OPENSEARCH: ${{ github.event.inputs.create_opensearch || 'true' }} jobs: action-test: @@ -74,7 +86,7 @@ jobs: if [[ -n "${{ inputs.cluster_name }}" ]]; then cluster_name="${{ inputs.cluster_name }}" else - cluster_name="cl-$(git rev-parse --short HEAD)" + cluster_name="cl-$(git rev-parse --short HEAD)-t" fi echo "cluster_name=$cluster_name" | tee -a "$GITHUB_OUTPUT" @@ -92,6 +104,20 @@ jobs: fi echo "db_password=$db_password" | tee -a "$GITHUB_OUTPUT" + if [[ -n "${{ inputs.opensearch_username }}" ]]; then + opensearch_username="${{ inputs.opensearch_username }}" + else + opensearch_username="user$(openssl rand -hex 4 | tr -d '/@" ')" + fi + echo "opensearch_username=$opensearch_username" | tee -a "$GITHUB_OUTPUT" + + if [[ -n "${{ inputs.opensearch_password }}" ]]; then + opensearch_password="${{ inputs.opensearch_password }}" + else + opensearch_password="$(openssl rand -base64 12 | tr -d '/@" ')" + fi + echo "opensearch_password=$opensearch_password" | tee -a "$GITHUB_OUTPUT" + # Get the current commit hash for the modules revision tf_modules_revision=$(git rev-parse HEAD) echo "tf_modules_revision=$tf_modules_revision" | tee -a "$GITHUB_OUTPUT" @@ -129,8 +155,8 @@ jobs: s3-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }} tf-modules-revision: ${{ steps.commit_info.outputs.tf_modules_revision }} - - name: Prepare Aurora Cluster - id: prepare_aurora_cluster + - name: After EKS creation infos + id: after_cluster_creation_infos run: | vpc_id=$(echo '${{ steps.create_eks_cluster.outputs.all-terraform-outputs }}' | jq -c -r '.vpc_id.value') echo "vpc_id=$vpc_id" | tee -a "$GITHUB_OUTPUT" @@ -153,19 +179,43 @@ jobs: cluster-name: ${{ steps.commit_info.outputs.cluster_name }} username: ${{ steps.commit_info.outputs.db_username }} password: ${{ steps.commit_info.outputs.db_password }} + aws-region: ${{ env.AWS_REGION }} s3-backend-bucket: ${{ env.TF_STATE_BUCKET }} s3-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }} tf-modules-revision: ${{ steps.commit_info.outputs.tf_modules_revision }} - vpc-id: ${{ steps.prepare_aurora_cluster.outputs.vpc_id }} - subnet-ids: ${{ steps.prepare_aurora_cluster.outputs.private_subnet_ids }} - cidr-blocks: ${{ steps.prepare_aurora_cluster.outputs.private_vpc_cidr_blocks }} + vpc-id: ${{ steps.after_cluster_creation_infos.outputs.vpc_id }} + subnet-ids: ${{ steps.after_cluster_creation_infos.outputs.private_subnet_ids }} + cidr-blocks: ${{ steps.after_cluster_creation_infos.outputs.private_vpc_cidr_blocks }} + + availability-zones: ${{ steps.after_cluster_creation_infos.outputs.availability_zones }} + + - name: Deploy OpenSearch Domain + uses: ./.github/actions/opensearch-manage-cluster + id: deploy_opensearch_domain + if: env.CREATE_OPENSEARCH == 'true' + with: + domain-name: ${{ steps.commit_info.outputs.cluster_name }}-opensearch + aws-region: ${{ env.AWS_REGION }} + + vpc-id: ${{ steps.after_cluster_creation_infos.outputs.vpc_id }} + subnet-ids: ${{ steps.after_cluster_creation_infos.outputs.private_subnet_ids }} + cidr-blocks: ${{ steps.after_cluster_creation_infos.outputs.private_vpc_cidr_blocks }} + + additional-terraform-vars: | + { + "advanced_security_master_user_name": "${{ steps.commit_info.outputs.opensearch_username }}", + "advanced_security_master_user_password": "${{ steps.commit_info.outputs.opensearch_password }}", + "advanced_security_internal_user_database_enabled": true + } - availability-zones: ${{ steps.prepare_aurora_cluster.outputs.availability_zones }} + s3-backend-bucket: ${{ env.TF_STATE_BUCKET }} + s3-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }} + tf-modules-revision: ${{ steps.commit_info.outputs.tf_modules_revision }} - - name: Delete Clusters - timeout-minutes: 60 + - name: Delete Resources + timeout-minutes: 120 if: always() && !(github.event_name == 'workflow_dispatch' && inputs.delete_cluster == 'false') uses: ./.github/actions/eks-cleanup-resources with: diff --git a/README.md b/README.md index 6033a774..6b442674 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ [![tests](https://github.com/camunda/camunda-tf-eks-module/actions/workflows/tests.yml/badge.svg?branch=main)](https://github.com/camunda/camunda-tf-eks-module/actions/workflows/tests.yml) [![License](https://img.shields.io/github/license/camunda/camunda-tf-eks-module)](LICENSE) -Terraform module which creates AWS EKS (Kubernetes) resources with an opinionated configuration targeting Camunda 8 and an AWS Aurora RDS cluster. +Terraform module which creates AWS EKS (Kubernetes) resources with an opinionated configuration targeting Camunda 8, an AWS Aurora RDS cluster and an OpenSearch domain. **⚠️ Warning:** This project is not intended for production use but rather for demonstration purposes only. There are no guarantees or warranties provided. @@ -15,9 +15,9 @@ Consider installing Camunda 8 via [this guide](https://docs.camunda.io/docs/next ## Usage -Below is a simple example configuration for deploying both an EKS cluster and an Aurora PostgreSQL database. +Below is a simple example configuration for deploying both an EKS cluster, an Aurora PostgreSQL database and an OpenSearch domain. -See [AWS EKS Cluster inputs](./modules/eks-cluster/README.md#inputs) and [AWS Aurora RDS inputs](./modules/aurora/README.md#inputs) for further configuration options and how they affect the cluster and database creation. +See [AWS EKS Cluster inputs](./modules/eks-cluster/README.md#inputs), [AWS Aurora RDS inputs](./modules/aurora/README.md#inputs) and [AWS OpenSearch inputs](./modules/opensearch/README.md#inputs) for further configuration options and how they affect the cluster and database creation. ```hcl module "eks_cluster" { @@ -50,13 +50,215 @@ module "postgresql" { } ``` +```hcl + +module "opensearch_domain" { + source = "github.com/camunda/camunda-tf-eks-module/modules/opensearch" + + domain_name = "my-opensearch-domain" + subnet_ids = module.eks_cluster.private_subnet_ids + security_group_ids = module.eks_cluster.security_group_ids + vpc_id = module.eks_cluster.vpc_id + cidr_blocks = concat(module.eks_cluster.private_vpc_cidr_blocks, module.eks_cluster.public_vpc_cidr_blocks) + + instance_type = "t3.small.search" + instance_count = 3 + ebs_volume_size = 100 + + advanced_security_enabled = true + advanced_security_internal_user_database_enabled = true + advanced_security_master_user_name = "admin" + advanced_security_master_user_password = "password" + + depends_on = [module.eks_cluster] +} +``` + #### GitHub Actions -You can automate the deployment and deletion of the EKS cluster and Aurora database using GitHub Actions. Below are examples of GitHub Actions workflows for deploying and deleting these resources. +You can automate the deployment and deletion of the EKS cluster and Aurora database using GitHub Actions. + +**Note:** This is recommended only for development and testing purposes, not for production use. + +Below are examples of GitHub Actions workflows for deploying and deleting these resources. + +For more details, refer to the corresponding [EKS Actions README](./.github/actions/eks-manage-cluster/README.md), [Aurora Actions README](./.github/actions/aurora-manage-cluster/README.md) and [OpenSearch Actions README](./.github/actions/opensearch-manage-cluster/README.md), [Cleanup Actions README](./.github/actions/eks-cleanup-resources/README.md). + +An example workflow can be found in [here](./.github/workflows/test-gha-eks.yml). + +#### Advanced usage with IRSA + +This documentation provides a step-by-step guide to creating an EKS cluster, an Aurora RDS instance, and an OpenSearch domain with IRSA (IAM Roles for Service Accounts) roles using Terraform modules. The modules create the necessary IAM roles and policies for Aurora and OpenSearch. To simplify the configuration, the modules use the outputs of the EKS cluster module to define the IRSA roles and policies. + +For further details and a more in-depth configuration, it is recommended to refer to the official documentation at: +- [Amazon EKS Terraform setup](https://docs.camunda.io/docs/self-managed/setup/deploy/amazon/amazon-eks/eks-terraform/) +- [IRSA roles setup](https://docs.camunda.io/docs/self-managed/setup/deploy/amazon/amazon-eks/irsa/) + + +### Aurora IRSA Role and Policy + +The Aurora module uses the following outputs from the EKS cluster module to define the IRSA role and policy: + +- `module.eks_cluster.oidc_provider_arn`: The ARN of the OIDC provider for the EKS cluster. +- `module.eks_cluster.oidc_provider_id`: The ID of the OIDC provider for the EKS cluster. +- `var.account_id`: Your AWS account id +- `var.aurora_cluster_name`: The name of the Aurora cluster to access +Here is the corrected version: +- `var.aurora_irsa_username`: The username used to access AuroraDB. This username is different from the superuser. The user must also be created manually in the database to enable the IRSA connection, as described in [the steps below](#create-irsa-user-on-the-database). +- `var.aurora_namespace`: The kubernetes namespace to allow access +- `var.aurora_service_account`: The kubernetes ServiceAccount to allow access + +You need to define the IAM role trust policy and access policy for Aurora. Here's an example of how to define these policies using the outputs of the EKS cluster module: + +```hcl +module "postgresql" { + # ... + iam_aurora_access_policy = < + annotations: + eks.amazonaws.com/role-arn: :role/AuroraRole> +``` +You can retrieve the role ARN from the module output: `aurora_role_arn`. + +**OpenSearch Service Account** + +```yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: opensearch-service-account + namespace: + annotations: + eks.amazonaws.com/role-arn: :role/OpenSearchRole> +``` +You can retrieve the role ARN from the module output: `opensearch_role_arn`. ## Support diff --git a/examples/camunda-8.6-irsa/opensearch.tf b/examples/camunda-8.6-irsa/opensearch.tf new file mode 100644 index 00000000..c4430637 --- /dev/null +++ b/examples/camunda-8.6-irsa/opensearch.tf @@ -0,0 +1,78 @@ +locals { + opensearch_domain_name = "domain-name-opensearch" # Replace "domain-name" with your domain name + + # IRSA configuration + camunda_namespace = "camunda" # Replace with your Kubernetes namespace that will host C8 Platform + camunda_zeebe_service_account = "zeebe-sa" # Replace with your Kubernetes ServiceAcccount that will be created for Zeebe + camunda_operate_service_account = "operate-sa" # Replace with your Kubernetes ServiceAcccount that will be created for Operate + camunda_tasklist_service_account = "tasklist-sa" # Replace with your Kubernetes ServiceAcccount that will be created for TaskList + camunda_optimize_service_account = "optimize-sa" # Replace with your Kubernetes ServiceAcccount that will be created for Optimize +} + +module "opensearch_domain" { + source = "git::https://github.com/camunda/camunda-tf-eks-module//modules/opensearch?ref=2.6.0" + domain_name = locals.opensearch_domain_name + engine_version = "2.15" + + instance_type = "t3.medium.search" + instance_count = 3 + ebs_volume_size = 50 + + subnet_ids = module.eks_cluster.private_subnet_ids + security_group_ids = module.eks_cluster.security_group_ids + vpc_id = module.eks_cluster.vpc_id + cidr_blocks = concat(module.eks_cluster.private_vpc_cidr_blocks, module.eks_cluster.public_vpc_cidr_blocks) + + advanced_security_enabled = true + advanced_security_internal_user_database_enabled = true + + # Supply your own secret values + advanced_security_master_user_name = "secret_user" + advanced_security_master_user_password = "secretvalue%23" + + depends_on = [module.eks_cluster] + + # IRSA configuration + iam_create_opensearch_role = true + iam_opensearch_role_name = "OpenSearchRole-${locals.opensearch_domain_name}" # Ensure uniqueness + + iam_opensearch_access_policy = < [default\_database\_name](#input\_default\_database\_name) | The name for the automatically created database on cluster creation. | `string` | `"camunda"` | no | | [engine](#input\_engine) | The engine type e.g. aurora, aurora-mysql, aurora-postgresql, ... | `string` | `"aurora-postgresql"` | no | | [engine\_version](#input\_engine\_version) | The DB engine version for Postgres to use. | `string` | `"15.4"` | no | +| [iam\_aurora\_access\_policy](#input\_iam\_aurora\_access\_policy) | Access policy for Aurora allowing access | `string` | `" {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"rds-db:connect\"\n ],\n \"Resource\": \"arn:aws:rds-db:::dbuser:/\"\n }\n ]\n }\n\n"` | no | +| [iam\_aurora\_role\_name](#input\_iam\_aurora\_role\_name) | Name of the AuroraRole IAM role | `string` | `"AuroraRole"` | no | | [iam\_auth\_enabled](#input\_iam\_auth\_enabled) | Determines whether IAM auth should be activated for IRSA usage | `bool` | `false` | no | +| [iam\_create\_aurora\_role](#input\_iam\_create\_aurora\_role) | Flag to determine if the Aurora IAM role should be created, if true, this module will create a role. Please ensure that iam\_auth\_enabled is set to `true` | `bool` | `false` | no | +| [iam\_role\_trust\_policy](#input\_iam\_role\_trust\_policy) | Assume role trust policy for Aurora role | `string` | `" {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Federated\": \"arn:aws:iam:::oidc-provider/oidc.eks..amazonaws.com/id/\"\n },\n \"Action\": \"sts:AssumeRoleWithWebIdentity\",\n \"Condition\": {\n \"StringEquals\": {\n \"oidc.eks..amazonaws.com/id/:sub\": \"system:serviceaccount::\"\n }\n }\n }\n ]\n }\n\n"` | no | | [iam\_roles](#input\_iam\_roles) | Allows propagating additional IAM roles to the Aurora cluster to allow e.g. access to S3 | `list(string)` | `[]` | no | | [instance\_class](#input\_instance\_class) | The instance type of the Aurora instances | `string` | `"db.t3.medium"` | no | | [num\_instances](#input\_num\_instances) | Number of instances | `string` | `"1"` | no | @@ -68,4 +75,7 @@ No modules. | Name | Description | |------|-------------| | [aurora\_endpoint](#output\_aurora\_endpoint) | The endpoint of the Aurora cluster | +| [aurora\_policy\_arn](#output\_aurora\_policy\_arn) | The ARN of the aurora access policy | +| [aurora\_role\_arn](#output\_aurora\_role\_arn) | The ARN of the aurora IAM role | +| [aurora\_role\_name](#output\_aurora\_role\_name) | The name of the aurora IAM role | diff --git a/modules/aurora/backup.tf b/modules/aurora/backup.tf new file mode 100644 index 00000000..5f1ea5e0 --- /dev/null +++ b/modules/aurora/backup.tf @@ -0,0 +1 @@ +# TODO: add backup diff --git a/modules/aurora/main.tf b/modules/aurora/main.tf index 185a162b..fcd4c8e2 100644 --- a/modules/aurora/main.tf +++ b/modules/aurora/main.tf @@ -1,6 +1,5 @@ # Provision an RDS Aurora cluster suitable for operating within our VPC and VPN connectivity. -# TODO: add backup resource "aws_rds_cluster" "aurora_cluster" { cluster_identifier = var.cluster_name availability_zones = var.availability_zones @@ -11,9 +10,10 @@ resource "aws_rds_cluster" "aurora_cluster" { master_username = var.username database_name = var.default_database_name - # New: Enable IAM auth + assign iam roles iam_database_authentication_enabled = var.iam_auth_enabled - iam_roles = var.iam_roles # only needed if wanted to grant access from Aurora to e.g. S3 + + # don't assign twice the roles, otherwise you may encounter conflicts + iam_roles = var.iam_roles # only needed if wanted to grant access from Aurora to e.g. S3 vpc_security_group_ids = [aws_security_group.this.id] db_subnet_group_name = aws_db_subnet_group.this.name @@ -67,40 +67,6 @@ resource "aws_rds_cluster_instance" "aurora_instance" { depends_on = [aws_rds_cluster.aurora_cluster] } -resource "aws_security_group" "this" { - name = "${var.cluster_name}-allow-all-internal-access" - description = "Security group managing access to ${var.cluster_name}" - - vpc_id = var.vpc_id - - tags = var.tags -} - -resource "aws_security_group_rule" "allow_egress" { - description = "Allow outgoing traffic for the aurora db" - - type = "egress" - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = var.cidr_blocks - - security_group_id = aws_security_group.this.id - -} - -resource "aws_security_group_rule" "allow_ingress" { - description = "Allow incoming traffic for the aurora db for port 5432" - - type = "ingress" - from_port = 5432 - to_port = 5432 - protocol = "tcp" - cidr_blocks = var.cidr_blocks - - security_group_id = aws_security_group.this.id -} - resource "aws_db_subnet_group" "this" { name = var.cluster_name diff --git a/modules/aurora/monitoring.tf b/modules/aurora/monitoring.tf new file mode 100644 index 00000000..912d2f82 --- /dev/null +++ b/modules/aurora/monitoring.tf @@ -0,0 +1 @@ +# TODO: add monitoring diff --git a/modules/aurora/networking.tf b/modules/aurora/networking.tf new file mode 100644 index 00000000..523478b4 --- /dev/null +++ b/modules/aurora/networking.tf @@ -0,0 +1,33 @@ +resource "aws_security_group" "this" { + name = "${var.cluster_name}-allow-all-internal-access" + description = "Security group managing access to ${var.cluster_name}" + + vpc_id = var.vpc_id + + tags = var.tags +} + +resource "aws_security_group_rule" "allow_egress" { + description = "Allow outgoing traffic for the aurora db" + + type = "egress" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = var.cidr_blocks + + security_group_id = aws_security_group.this.id + +} + +resource "aws_security_group_rule" "allow_ingress" { + description = "Allow incoming traffic for the aurora db for port 5432" + + type = "ingress" + from_port = 5432 + to_port = 5432 + protocol = "tcp" + cidr_blocks = var.cidr_blocks + + security_group_id = aws_security_group.this.id +} diff --git a/modules/aurora/output.tf b/modules/aurora/output.tf index fcdf1738..0457cd94 100644 --- a/modules/aurora/output.tf +++ b/modules/aurora/output.tf @@ -2,3 +2,21 @@ output "aurora_endpoint" { value = aws_rds_cluster.aurora_cluster.endpoint description = "The endpoint of the Aurora cluster" } + +output "aurora_role_name" { + description = "The name of the aurora IAM role" + value = var.iam_create_aurora_role ? aws_iam_role.aurora_role[0].name : "" + sensitive = false +} + +output "aurora_role_arn" { + description = "The ARN of the aurora IAM role" + value = var.iam_create_aurora_role ? aws_iam_role.aurora_role[0].arn : "" + sensitive = false +} + +output "aurora_policy_arn" { + description = "The ARN of the aurora access policy" + value = var.iam_create_aurora_role ? aws_iam_policy.aurora_access_policy[0].arn : "" + sensitive = false +} diff --git a/modules/aurora/role.tf b/modules/aurora/role.tf new file mode 100644 index 00000000..1df6461a --- /dev/null +++ b/modules/aurora/role.tf @@ -0,0 +1,25 @@ +// IAM Role for Aurora +resource "aws_iam_role" "aurora_role" { + count = var.iam_create_aurora_role ? 1 : 0 + + name = var.iam_aurora_role_name + assume_role_policy = var.iam_role_trust_policy +} + +// IAM Policy for Aurora Access +resource "aws_iam_policy" "aurora_access_policy" { + count = var.iam_create_aurora_role ? 1 : 0 + + name = "${var.iam_aurora_role_name}-access-policy" + description = "Access policy for Aurora" + + policy = var.iam_aurora_access_policy +} + +// Attach the policy to the role +resource "aws_iam_role_policy_attachment" "attach_aurora_policy" { + count = var.iam_create_aurora_role ? 1 : 0 + + role = aws_iam_role.aurora_role[0].name + policy_arn = aws_iam_policy.aurora_access_policy[0].arn +} diff --git a/modules/aurora/variables.tf b/modules/aurora/variables.tf index 565b7a5f..aaf5bc58 100644 --- a/modules/aurora/variables.tf +++ b/modules/aurora/variables.tf @@ -93,3 +93,61 @@ variable "default_database_name" { default = "camunda" description = "The name for the automatically created database on cluster creation." } + +variable "iam_create_aurora_role" { + description = "Flag to determine if the Aurora IAM role should be created, if true, this module will create a role. Please ensure that iam_auth_enabled is set to `true`" + type = bool + default = false +} + +variable "iam_aurora_role_name" { + description = "Name of the AuroraRole IAM role" + type = string + default = "AuroraRole" +} + +variable "iam_role_trust_policy" { + description = "Assume role trust policy for Aurora role" + type = string + default = <:oidc-provider/oidc.eks..amazonaws.com/id/" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "oidc.eks..amazonaws.com/id/:sub": "system:serviceaccount::" + } + } + } + ] + } + +EOF +} + +variable "iam_aurora_access_policy" { + # see https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.IAMPolicy.html + description = "Access policy for Aurora allowing access" + type = string + default = <::dbuser:/" + } + ] + } + +EOF +} diff --git a/modules/eks-cluster/README.md b/modules/eks-cluster/README.md index a060418a..c003f501 100644 --- a/modules/eks-cluster/README.md +++ b/modules/eks-cluster/README.md @@ -53,6 +53,7 @@ module "eks_cluster" { | [authentication\_mode](#input\_authentication\_mode) | The authentication mode for the cluster. | `string` | `"API"` | no | | [cluster\_node\_ipv4\_cidr](#input\_cluster\_node\_ipv4\_cidr) | The CIDR block for public and private subnets of loadbalancers and nodes. Between /28 and /16. | `string` | `"10.192.0.0/16"` | no | | [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | The CIDR block to assign Kubernetes service IP addresses from. Between /24 and /12. | `string` | `"10.190.0.0/16"` | no | +| [cluster\_tags](#input\_cluster\_tags) | A map of additional tags to add to the cluster | `map(string)` | `{}` | no | | [enable\_cluster\_creator\_admin\_permissions](#input\_enable\_cluster\_creator\_admin\_permissions) | Indicates whether or not to add the cluster creator (the identity used by Terraform) as an administrator via access entry. | `bool` | `true` | no | | [kubernetes\_version](#input\_kubernetes\_version) | Kubernetes version to be used by EKS | `string` | `"1.30"` | no | | [name](#input\_name) | Name being used for relevant resources - including EKS cluster name | `string` | n/a | yes | @@ -61,6 +62,7 @@ module "eks_cluster" { | [np\_desired\_node\_count](#input\_np\_desired\_node\_count) | Actual number of nodes for the default node pool. Min-Max will be used for autoscaling | `number` | `4` | no | | [np\_disk\_size](#input\_np\_disk\_size) | Disk size of the nodes on the default node pool | `number` | `20` | no | | [np\_instance\_types](#input\_np\_instance\_types) | Allow passing a list of instance types for the auto scaler to select from when scaling the default node pool | `list(string)` |
[
"m6i.xlarge"
]
| no | +| [np\_labels](#input\_np\_labels) | A map of labels to add to the default pool nodes | `map(string)` | `{}` | no | | [np\_max\_node\_count](#input\_np\_max\_node\_count) | Maximum number of nodes for the default node pool | `number` | `10` | no | | [np\_min\_node\_count](#input\_np\_min\_node\_count) | Minimum number of nodes for the default node pool | `number` | `1` | no | | [region](#input\_region) | The region where the cluster and relevant resources should be deployed in | `string` | n/a | yes | @@ -69,6 +71,7 @@ module "eks_cluster" { | Name | Description | |------|-------------| | [access\_entries](#output\_access\_entries) | Map of access entries created and their attributes | +| [aws\_caller\_identity\_account\_id](#output\_aws\_caller\_identity\_account\_id) | Account ID of the current AWS account | | [cert\_manager\_arn](#output\_cert\_manager\_arn) | Amazon Resource Name of the cert-manager IAM role used for IAM Roles to Service Accounts mappings | | [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server | | [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster | @@ -80,6 +83,7 @@ module "eks_cluster" { | [ebs\_cs\_arn](#output\_ebs\_cs\_arn) | Amazon Resource Name of the ebs-csi IAM role used for IAM Roles to Service Accounts mappings | | [external\_dns\_arn](#output\_external\_dns\_arn) | Amazon Resource Name of the external-dns IAM role used for IAM Roles to Service Accounts mappings | | [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | Amazon Resource Name of the OIDC provider for the EKS cluster. Allows to add additional IRSA mappings | +| [oidc\_provider\_id](#output\_oidc\_provider\_id) | OIDC provider for the EKS cluster. Allows to add additional IRSA mappings | | [private\_route\_table\_ids](#output\_private\_route\_table\_ids) | The IDs of the private route tables associated with this VPC | | [private\_subnet\_ids](#output\_private\_subnet\_ids) | Private subnet IDs | | [private\_vpc\_cidr\_blocks](#output\_private\_vpc\_cidr\_blocks) | Private VPC CIDR blocks | diff --git a/modules/eks-cluster/backup.tf b/modules/eks-cluster/backup.tf new file mode 100644 index 00000000..5f1ea5e0 --- /dev/null +++ b/modules/eks-cluster/backup.tf @@ -0,0 +1 @@ +# TODO: add backup diff --git a/modules/eks-cluster/cluster.tf b/modules/eks-cluster/cluster.tf index d356931c..06eb9b9e 100644 --- a/modules/eks-cluster/cluster.tf +++ b/modules/eks-cluster/cluster.tf @@ -12,6 +12,8 @@ module "eks" { cluster_endpoint_private_access = true # private API communication for nodes within the VPC cluster_endpoint_public_access = true # API accessible to engineers + cluster_tags = var.cluster_tags + cluster_addons = { coredns = { most_recent = true @@ -84,6 +86,8 @@ module "eks" { instance_types = var.np_instance_types capacity_type = var.np_capacity_type + labels = var.np_labels + update_config = { max_unavailable = 1 } @@ -103,9 +107,9 @@ module "eks" { # EKS Managed Node Group definitions eks_managed_node_groups = { services = { - labels = {} name = "services" use_name_prefix = false + labels = var.np_labels } } diff --git a/modules/eks-cluster/monitoring.tf b/modules/eks-cluster/monitoring.tf new file mode 100644 index 00000000..912d2f82 --- /dev/null +++ b/modules/eks-cluster/monitoring.tf @@ -0,0 +1 @@ +# TODO: add monitoring diff --git a/modules/eks-cluster/outputs.tf b/modules/eks-cluster/outputs.tf index 97e138a5..164473ab 100644 --- a/modules/eks-cluster/outputs.tf +++ b/modules/eks-cluster/outputs.tf @@ -71,6 +71,16 @@ output "oidc_provider_arn" { description = "Amazon Resource Name of the OIDC provider for the EKS cluster. Allows to add additional IRSA mappings" } +output "aws_caller_identity_account_id" { + value = data.aws_caller_identity.current.account_id + description = "Account ID of the current AWS account" +} + +output "oidc_provider_id" { + value = replace(module.eks.oidc_provider_arn, "arn:aws:iam::${data.aws_caller_identity.current.account_id}:oidc-provider/", "") + description = "OIDC provider for the EKS cluster. Allows to add additional IRSA mappings" +} + ################################################################################ # VPC ################################################################################ diff --git a/modules/eks-cluster/variables.tf b/modules/eks-cluster/variables.tf index 5c23ab90..7609e0ff 100644 --- a/modules/eks-cluster/variables.tf +++ b/modules/eks-cluster/variables.tf @@ -35,6 +35,18 @@ variable "np_desired_node_count" { default = 4 } +variable "np_labels" { + type = map(string) + description = "A map of labels to add to the default pool nodes" + default = {} +} + +variable "cluster_tags" { + type = map(string) + description = "A map of additional tags to add to the cluster" + default = {} +} + variable "np_instance_types" { type = list(string) description = "Allow passing a list of instance types for the auto scaler to select from when scaling the default node pool" diff --git a/modules/fixtures/fixtures.default.aurora.tfvars b/modules/fixtures/fixtures.default.aurora.tfvars index e69de29b..5f920138 100644 --- a/modules/fixtures/fixtures.default.aurora.tfvars +++ b/modules/fixtures/fixtures.default.aurora.tfvars @@ -0,0 +1,3 @@ +tags = { + Environment = "tests" +} diff --git a/modules/fixtures/fixtures.default.eks.tfvars b/modules/fixtures/fixtures.default.eks.tfvars index c11b59b1..2220f06a 100644 --- a/modules/fixtures/fixtures.default.eks.tfvars +++ b/modules/fixtures/fixtures.default.eks.tfvars @@ -4,3 +4,9 @@ np_instance_types = ["t2.medium"] # spot instances are cheaper with same performances for non production environments np_capacity_type = "SPOT" +cluster_tags = { + Environment = "tests" +} +np_labels = { + Environment = "tests" +} diff --git a/modules/fixtures/fixtures.default.opensearch.tfvars b/modules/fixtures/fixtures.default.opensearch.tfvars new file mode 100644 index 00000000..5f920138 --- /dev/null +++ b/modules/fixtures/fixtures.default.opensearch.tfvars @@ -0,0 +1,3 @@ +tags = { + Environment = "tests" +} diff --git a/modules/fixtures/opensearch-client.yml b/modules/fixtures/opensearch-client.yml new file mode 100644 index 00000000..3dcc8a39 --- /dev/null +++ b/modules/fixtures/opensearch-client.yml @@ -0,0 +1,86 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: opensearch-client + labels: + app: opensearch-client +spec: + backoffLimit: 0 + template: + spec: + serviceAccountName: opensearch-access-sa + restartPolicy: Never + containers: + - name: opensearch-client + image: amazonlinux:latest + command: + - sh + - -c + - | + /bin/bash <<'EOF' + set -euxo pipefail + + echo "Installing dependencies..." + yum install -y unzip awscli-2 + + echo "Installing OpenSearch CLI..." + curl -L "https://github.com/opensearch-project/opensearch-cli/releases/download/v1.2.0/opensearch-cli-1.2.0-linux-x64.zip" -o "opensearch.zip" + unzip opensearch.zip + + # Create or replace the ~/.aws/config file with the values from the environment variables + mkdir -p ~/.aws + cat < ~/.aws/config + [profile opensearch] + role_arn = "$AWS_ROLE_ARN" + web_identity_token_file = "$AWS_WEB_IDENTITY_TOKEN_FILE" + EOCONFIG + chmod 0600 ~/.aws/config + + echo "AWS IRSA profile configured:" + cat ~/.aws/config + + # Attempt unauthenticated access to the OpenSearch cluster, expecting a failure + if curl -s -o /dev/null -w "%{http_code}" https://$OPENSEARCH_ENDPOINT/_cluster/health | grep -q '403'; then + echo "Unauthenticated access failed as expected." + else + echo "Unauthenticated access did not fail as expected, check the configuration." + exit 1 + fi + + echo "Testing OpenSearch connection using IRSA..." + + # Create or replace the /root/.opensearch-cli/config.yaml file with the values from the environment variables + mkdir -p ~/.opensearch-cli + cat < ~/.opensearch-cli/config.yaml + profiles: + - name: opensearch + endpoint: https://$OPENSEARCH_ENDPOINT + aws_iam: + profile: opensearch + service: es + max_retry: 3 + timeout: 10 + EOCONFIG_OPENSEARCH + chmod 0600 ~/.opensearch-cli/config.yaml + + echo "OpenSearch CLI profile configured:" + cat ~/.opensearch-cli/config.yaml + + # Test OpenSearch connection using the opensearch profile + ./opensearch-cli curl get --path _cluster/health --profile opensearch + ./opensearch-cli curl put --path /my_index --profile opensearch + ./opensearch-cli curl get --path /my_index --profile opensearch + + EOF + env: + - name: OPENSEARCH_ENDPOINT + valueFrom: + configMapKeyRef: + name: opensearch-config + key: opensearch_endpoint + - name: AWS_REGION + valueFrom: + configMapKeyRef: + name: opensearch-config + key: aws_region diff --git a/modules/fixtures/postgres-client-irsa.yml b/modules/fixtures/postgres-client-irsa.yml deleted file mode 100644 index 664393f8..00000000 --- a/modules/fixtures/postgres-client-irsa.yml +++ /dev/null @@ -1,80 +0,0 @@ ---- -# this manifest contains a version with the IRSA connection check, it is currently listed as a TODO -# it may be implemented or dropped depending on if it's relevant or not to test IRSA connection for the db -apiVersion: batch/v1 -kind: Job -metadata: - name: postgres-client - labels: - app: postgres-client -spec: - backoffLimit: 0 - template: - spec: - restartPolicy: Never - containers: - - name: postgres-client - image: ubuntu:latest - command: - - sh - - -c - - | - /bin/bash <<'EOF' - set -o pipefail && \ - apt-get update && \ - apt-get install -y python3 python3-pip build-essential postgresql-client && \ - echo "Creating IRSA db user" && \ - mkdir -p /tmp/scripts && \ - cp /scripts/create_aurora_pg_db.sh /tmp/scripts/create_aurora_pg_db.sh && \ - chmod +x /tmp/scripts/create_aurora_pg_db.sh && /tmp/scripts/create_aurora_pg_db.sh && \ - echo "Testing connection using IRSA" && \ - python3 -m pip install awscli && \ - AWS_PG_PASSWORD=$(aws rds generate-db-auth-token --hostname $AURORA_ENDPOINT --port $AURORA_PORT \ - --region $AWS_REGION --username $AURORA_USERNAME_IRSA) && \ - psql -h $AURORA_ENDPOINT -p $AURORA_PORT "dbname=$AURORA_DB_NAME user=$AURORA_USERNAME_IRSA password=$AWS_PG_PASSWORD" -c 'SELECT version();' - - EOF - volumeMounts: - - name: scripts - mountPath: /scripts - readOnly: true - env: - - name: AURORA_ENDPOINT - valueFrom: - configMapKeyRef: - name: aurora-config - key: aurora_endpoint - - name: AURORA_USERNAME - valueFrom: - configMapKeyRef: - name: aurora-config - key: aurora_username - - name: AURORA_USERNAME_IRSA - valueFrom: - configMapKeyRef: - name: aurora-config - key: aurora_username_irsa - - name: AURORA_PASSWORD - valueFrom: - secretKeyRef: - name: aurora-secret - key: aurora_password - - name: AURORA_PORT - valueFrom: - configMapKeyRef: - name: aurora-config - key: aurora_port - - name: AWS_REGION - valueFrom: - configMapKeyRef: - name: aurora-config - key: aws_region - - name: AURORA_DB_NAME - valueFrom: - configMapKeyRef: - name: aurora-config - key: aurora_db_name - volumes: - - name: scripts - configMap: - name: postgres-scripts diff --git a/modules/fixtures/postgres-client.yml b/modules/fixtures/postgres-client.yml index 213fdd3e..2096c53c 100644 --- a/modules/fixtures/postgres-client.yml +++ b/modules/fixtures/postgres-client.yml @@ -1,4 +1,5 @@ --- +# this manifest contains a version with the IRSA connection check apiVersion: batch/v1 kind: Job metadata: @@ -9,28 +10,48 @@ spec: backoffLimit: 0 template: spec: + serviceAccountName: aurora-access-sa restartPolicy: Never containers: - name: postgres-client - image: ubuntu:latest + image: amazonlinux:latest command: - sh - -c - | /bin/bash <<'EOF' - set -o pipefail && \ - apt-get update && \ - apt-get install -y python3 python3-pip build-essential postgresql-client && \ - echo "Creating IRSA db user" && \ - mkdir -p /tmp/scripts && \ - cp /scripts/create_aurora_pg_db.sh /tmp/scripts/create_aurora_pg_db.sh && \ - chmod +x /tmp/scripts/create_aurora_pg_db.sh && /tmp/scripts/create_aurora_pg_db.sh + set -o pipefail + + echo "Installing dependencies..." + yum install -y curl postgresql15 unzip awscli-2 + + echo "Creating IRSA db user using admin user" + psql -h $AURORA_ENDPOINT -p $AURORA_PORT "sslmode=require dbname=$AURORA_DB_NAME user=$AURORA_USERNAME password=$AURORA_PASSWORD" \ + -c "CREATE USER \"${AURORA_USERNAME_IRSA}\" WITH LOGIN;" \ + -c "GRANT rds_iam TO \"${AURORA_USERNAME_IRSA}\";" \ + -c "GRANT rds_superuser TO \"${AURORA_USERNAME_IRSA}\";" \ + -c "GRANT ALL PRIVILEGES ON DATABASE \"${AURORA_DB_NAME}\" TO \"${AURORA_USERNAME_IRSA}\";" \ + -c "SELECT aurora_version();" \ + -c "SELECT version();" -c "\du" + + # Attempt unauthenticated access to the Aurora PostgreSQL database, expecting a failure + if ! psql -h "$AURORA_ENDPOINT" \ + -p "$AURORA_PORT" \ + "sslmode=require dbname=$AURORA_DB_NAME user=$AURORA_USERNAME_IRSA password=$AWS_PG_PASSWORD" \ + -c 'SELECT version();' 2>/dev/null; then + echo "Unauthenticated access failed as expected." + else + echo "Unauthenticated access did not fail as expected, check the configuration." + exit 1 + fi + + echo "Testing connection using IRSA" + export AWS_PG_PASSWORD=$(aws rds generate-db-auth-token --hostname $AURORA_ENDPOINT --port $AURORA_PORT \ + --region $AWS_REGION --username $AURORA_USERNAME_IRSA) + psql -h $AURORA_ENDPOINT -p $AURORA_PORT "sslmode=require dbname=$AURORA_DB_NAME user=$AURORA_USERNAME_IRSA password=$AWS_PG_PASSWORD" \ + -c 'SELECT version();' EOF - volumeMounts: - - name: scripts - mountPath: /scripts - readOnly: true env: - name: AURORA_ENDPOINT valueFrom: @@ -67,7 +88,3 @@ spec: configMapKeyRef: name: aurora-config key: aurora_db_name - volumes: - - name: scripts - configMap: - name: postgres-scripts diff --git a/modules/fixtures/scripts/create_aurora_pg_db.sh b/modules/fixtures/scripts/create_aurora_pg_db.sh deleted file mode 100755 index beb67b16..00000000 --- a/modules/fixtures/scripts/create_aurora_pg_db.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -# see https://github.com/camunda/infra-core/tree/opensearch-cluster/camunda-opensearch#user-setup -psql -h $AURORA_ENDPOINT -p $AURORA_PORT "dbname=$AURORA_DB_NAME user=$AURORA_USERNAME password=$AURORA_PASSWORD" \ - -c "CREATE USER \"${AURORA_USERNAME_IRSA}\" WITH LOGIN;" \ - -c "GRANT rds_iam TO \"${AURORA_USERNAME_IRSA}\";" \ - -c "GRANT rds_superuser TO \"${AURORA_USERNAME_IRSA}\";" \ - -c "GRANT ALL PRIVILEGES ON DATABASE \"${AURORA_DB_NAME}\" TO \"${AURORA_USERNAME_IRSA}\";" \ - -c "SELECT aurora_version();" \ - -c "SELECT version();" -c "\du" diff --git a/modules/opensearch/README.md b/modules/opensearch/README.md new file mode 100644 index 00000000..f3086546 --- /dev/null +++ b/modules/opensearch/README.md @@ -0,0 +1,146 @@ +# AWS OpenSearch Domain Terraform Module + +This Terraform module creates and manages an AWS OpenSearch domain. The module is designed to be integrated with an existing EKS cluster or VPC for seamless setup and management. Below is a detailed explanation of the module's configuration options and usage. + +## Usage + +Below is a simple example configuration that demonstrates how to use this module. Adjust the values as needed for your specific setup. + +```hcl +module "opensearch_domain" { + source = "github.com/camunda/camunda-tf-eks-module/modules/opensearch" + + domain_name = "my-opensearch-domain" + engine_version = "2.15" + subnet_ids = module.eks_cluster.private_subnet_ids + security_group_ids = module.eks_cluster.security_group_ids + vpc_id = module.eks_cluster.vpc_id + cidr_blocks = concat(module.eks_cluster.private_vpc_cidr_blocks, module.eks_cluster.public_vpc_cidr_blocks) + + instance_type = "t3.small.search" + instance_count = 3 + ebs_volume_size = 100 + + advanced_security_enabled = true + advanced_security_internal_user_database_enabled = true + advanced_security_master_user_name = "admin" + advanced_security_master_user_password = "password" + + encrypt_at_rest_kms_key_id = "kms-key-id" + access_policies = < +## Modules + +No modules. +## Resources + +| Name | Type | +|------|------| +| [aws_iam_policy.opensearch_access_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_role.opensearch](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy_attachment.attach_opensearch_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_kms_key.kms](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource | +| [aws_opensearch_domain.opensearch_cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/opensearch_domain) | resource | +| [aws_security_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [aws_security_group_rule.allow_egress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | +| [aws_security_group_rule.allow_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [access\_policies](#input\_access\_policies) | IAM policy document specifying the access policies for the domain. | `string` | `"{}"` | no | +| [advanced\_options](#input\_advanced\_options) | Key-value string pairs to specify advanced configuration options. | `map(any)` |
{
"rest.action.multi.allow_explicit_index": true
}
| no | +| [advanced\_security\_anonymous\_auth\_enabled](#input\_advanced\_security\_anonymous\_auth\_enabled) | Whether the anonymous auth is enabled. | `bool` | `false` | no | +| [advanced\_security\_enabled](#input\_advanced\_security\_enabled) | Whether advanced security is enabled. | `bool` | `false` | no | +| [advanced\_security\_internal\_user\_database\_enabled](#input\_advanced\_security\_internal\_user\_database\_enabled) | Whether the internal user database is enabled. | `bool` | `false` | no | +| [advanced\_security\_master\_user\_name](#input\_advanced\_security\_master\_user\_name) | Main user's username, which is stored in the Amazon Elasticsearch Service domain's internal database. Only specify if `advanced_security_internal_user_database_enabled` is set to true. | `string` | `"opensearch-admin"` | no | +| [advanced\_security\_master\_user\_password](#input\_advanced\_security\_master\_user\_password) | Main user's password, which is stored in the Amazon Elasticsearch Service domain's internal database. Only specify if `advanced_security_internal_user_database_enabled` is set to true. | `string` | `""` | no | +| [auto\_software\_update\_enabled](#input\_auto\_software\_update\_enabled) | Software update auto for the domain. | `bool` | `false` | no | +| [auto\_tune\_options](#input\_auto\_tune\_options) | Configuration block for the Auto-Tune options of the domain | `any` |
{
"desired_state": "DISABLED",
"rollback_on_disable": "NO_ROLLBACK"
}
| no | +| [automated\_snapshot\_start\_hour](#input\_automated\_snapshot\_start\_hour) | Hour during which the service takes an automated daily snapshot of the indices in the domain. | `number` | `0` | no | +| [cidr\_blocks](#input\_cidr\_blocks) | The CIDR blocks to allow access from and to. | `list(string)` | n/a | yes | +| [cold\_storage\_enabled](#input\_cold\_storage\_enabled) | Indicates cold storage is enabled. | `bool` | `false` | no | +| [create\_timeout](#input\_create\_timeout) | How much time to wait for the creation before timing out. | `string` | `"2h"` | no | +| [dedicated\_master\_count](#input\_dedicated\_master\_count) | Number of dedicated master nodes in the cluster. | `number` | `3` | no | +| [dedicated\_master\_enabled](#input\_dedicated\_master\_enabled) | Indicates whether dedicated master nodes are enabled for the cluster. | `bool` | `true` | no | +| [dedicated\_master\_type](#input\_dedicated\_master\_type) | Instance type of the dedicated master nodes in the cluster. | `string` | `"m4.large.search"` | no | +| [domain\_endpoint\_options](#input\_domain\_endpoint\_options) | Configuration block for domain endpoint HTTP(S) related options | `any` |
{
"enforce_https": true,
"tls_security_policy": "Policy-Min-TLS-1-2-2019-07"
}
| no | +| [domain\_name](#input\_domain\_name) | Name of the domain. | `string` | n/a | yes | +| [ebs\_enabled](#input\_ebs\_enabled) | Whether EBS volumes are attached to data nodes in the domain. | `bool` | `true` | no | +| [ebs\_iops](#input\_ebs\_iops) | Baseline input/output (I/O) performance of EBS volumes attached to data nodes. Applicable only for the GP3 and Provisioned IOPS EBS volume types. | `number` | `3000` | no | +| [ebs\_throughput](#input\_ebs\_throughput) | (Required if `ebs_volume_type` is set to gp3) Specifies the throughput (in MiB/s) of the EBS volumes attached to data nodes. Applicable only for the gp3 volume type. | `number` | `125` | no | +| [ebs\_volume\_size](#input\_ebs\_volume\_size) | Size of EBS volumes attached to data nodes. | `number` | `64` | no | +| [ebs\_volume\_type](#input\_ebs\_volume\_type) | Type of EBS volumes attached to data nodes. | `string` | `"gp3"` | no | +| [enable\_access\_policy](#input\_enable\_access\_policy) | Determines whether an access policy will be applied to the domain | `bool` | `true` | no | +| [engine\_version](#input\_engine\_version) | OpenSearch version for the domain. | `string` | `"2.15"` | no | +| [iam\_create\_opensearch\_role](#input\_iam\_create\_opensearch\_role) | Flag to determine if the OpenSearch role should be created | `bool` | `false` | no | +| [iam\_opensearch\_access\_policy](#input\_iam\_opensearch\_access\_policy) | Access policy for OpenSearch allowing access | `string` | `" {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"es:ESHttpGet\",\n \"es:ESHttpPut\",\n \"es:ESHttpPost\"\n ],\n \"Resource\": \"arn:aws:es:::domain//*\"\n }\n ]\n }\n\n"` | no | +| [iam\_opensearch\_role\_name](#input\_iam\_opensearch\_role\_name) | Name of the OpenSearch IAM role | `string` | `"OpenSearchRole"` | no | +| [iam\_role\_trust\_policy](#input\_iam\_role\_trust\_policy) | Assume role trust policy for OpenSearch role | `string` | `" {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Federated\": \"arn:aws:iam:::oidc-provider/oidc.eks..amazonaws.com/id/\"\n },\n \"Action\": \"sts:AssumeRoleWithWebIdentity\",\n \"Condition\": {\n \"StringEquals\": {\n \"oidc.eks..amazonaws.com/id/:sub\": \"system:serviceaccount::\"\n }\n }\n }\n ]\n }\n\n"` | no | +| [instance\_count](#input\_instance\_count) | Number of instances in the cluster. | `number` | `3` | no | +| [instance\_type](#input\_instance\_type) | Instance type of data nodes in the cluster. | `string` | `"t3.small.search"` | no | +| [ip\_address\_type](#input\_ip\_address\_type) | The IP address type for the endpoint. Valid values are ipv4 and dualstack | `string` | `"ipv4"` | no | +| [kms\_key\_delete\_window\_in\_days](#input\_kms\_key\_delete\_window\_in\_days) | The number of days before the KMS key is deleted after being disabled. | `number` | `7` | no | +| [kms\_key\_enable\_key\_rotation](#input\_kms\_key\_enable\_key\_rotation) | Specifies whether automatic key rotation is enabled for the KMS key. | `bool` | `true` | no | +| [kms\_key\_tags](#input\_kms\_key\_tags) | The tags to associate with the KMS key. | `map(string)` | `{}` | no | +| [multi\_az\_with\_standby\_enabled](#input\_multi\_az\_with\_standby\_enabled) | Whether a multi-AZ domain is turned on with a standby AZ. | `bool` | `false` | no | +| [node\_to\_node\_encryption\_enabled](#input\_node\_to\_node\_encryption\_enabled) | Whether node to node encryption is enabled. | `bool` | `true` | no | +| [off\_peak\_window\_enabled](#input\_off\_peak\_window\_enabled) | Whether to enable off peak update | `bool` | `true` | no | +| [security\_group\_ids](#input\_security\_group\_ids) | Additional security groups used by the domain. | `list(string)` | `[]` | no | +| [subnet\_ids](#input\_subnet\_ids) | The subnet IDs to create the cluster in. For easier usage we are passing through the subnet IDs from the AWS EKS Cluster module. | `list(string)` | n/a | yes | +| [tags](#input\_tags) | Tags assigned to the domain. | `map(string)` | `{}` | no | +| [vpc\_id](#input\_vpc\_id) | VPC used by the domain. | `string` | n/a | yes | +| [warm\_count](#input\_warm\_count) | Number of warm nodes in the cluster. | `number` | `2` | no | +| [warm\_enabled](#input\_warm\_enabled) | Warm storage is enabled. | `bool` | `false` | no | +| [warm\_type](#input\_warm\_type) | Instance type for the OpenSearch cluster's warm nodes. | `string` | `"ultrawarm1.medium.search"` | no | +| [zone\_awareness\_availability\_zone\_count](#input\_zone\_awareness\_availability\_zone\_count) | Number of availability zones used. | `number` | `3` | no | +| [zone\_awareness\_enabled](#input\_zone\_awareness\_enabled) | Indicates whether zone awareness is enabled. | `bool` | `true` | no | +## Outputs + +| Name | Description | +|------|-------------| +| [kms\_key\_arn](#output\_kms\_key\_arn) | The ARN of the KMS key used to encrypt the OpenSearch domain | +| [kms\_key\_id](#output\_kms\_key\_id) | The ID of the KMS key used for OpenSearch domain encryption | +| [opensearch\_cluster](#output\_opensearch\_cluster) | OpenSearch cluster output | +| [opensearch\_domain\_arn](#output\_opensearch\_domain\_arn) | The ARN of the OpenSearch domain | +| [opensearch\_domain\_endpoint](#output\_opensearch\_domain\_endpoint) | The endpoint of the OpenSearch domain | +| [opensearch\_domain\_id](#output\_opensearch\_domain\_id) | The ID of the OpenSearch domain | +| [opensearch\_policy\_arn](#output\_opensearch\_policy\_arn) | The ARN of the OpenSearch access policy | +| [opensearch\_role\_arn](#output\_opensearch\_role\_arn) | The ARN of the OpenSearch IAM role | +| [opensearch\_role\_name](#output\_opensearch\_role\_name) | The name of the OpenSearch IAM role | +| [security\_group\_id](#output\_security\_group\_id) | The ID of the security group used by OpenSearch | +| [security\_group\_rule\_egress](#output\_security\_group\_rule\_egress) | Egress rule information for OpenSearch security group | +| [security\_group\_rule\_ingress](#output\_security\_group\_rule\_ingress) | Ingress rule information for OpenSearch security group | + diff --git a/modules/opensearch/backup.tf b/modules/opensearch/backup.tf new file mode 100644 index 00000000..7856a5df --- /dev/null +++ b/modules/opensearch/backup.tf @@ -0,0 +1 @@ +# TODO: test backup diff --git a/modules/opensearch/main.tf b/modules/opensearch/main.tf new file mode 100644 index 00000000..c1de0203 --- /dev/null +++ b/modules/opensearch/main.tf @@ -0,0 +1,105 @@ +resource "aws_opensearch_domain" "opensearch_cluster" { + tags = var.tags + + domain_name = var.domain_name + engine_version = "OpenSearch_${var.engine_version}" + + ip_address_type = var.ip_address_type + + vpc_options { + subnet_ids = var.subnet_ids + security_group_ids = concat([aws_security_group.this.id], var.security_group_ids) + } + + off_peak_window_options { + enabled = var.off_peak_window_enabled + } + + # TODO: integrate logwatch in this component but also in the other for production ready solution + + cluster_config { + instance_type = var.instance_type + instance_count = var.instance_count + + cold_storage_options { + enabled = var.cold_storage_enabled + } + + dedicated_master_enabled = var.dedicated_master_enabled + dedicated_master_type = var.dedicated_master_type + dedicated_master_count = var.dedicated_master_count + multi_az_with_standby_enabled = var.multi_az_with_standby_enabled + + warm_enabled = var.warm_enabled + warm_count = var.warm_enabled ? var.warm_count : null + warm_type = var.warm_enabled ? var.warm_type : null + + zone_awareness_config { + availability_zone_count = var.zone_awareness_availability_zone_count + } + zone_awareness_enabled = var.zone_awareness_enabled + } + + software_update_options { + auto_software_update_enabled = var.auto_software_update_enabled + } + + advanced_security_options { + enabled = var.advanced_security_enabled + internal_user_database_enabled = var.advanced_security_internal_user_database_enabled + + master_user_options { + master_user_name = var.advanced_security_master_user_name + master_user_password = var.advanced_security_master_user_password + } + + anonymous_auth_enabled = var.advanced_security_anonymous_auth_enabled + } + + encrypt_at_rest { + enabled = true + kms_key_id = aws_kms_key.kms.key_id + } + + node_to_node_encryption { + enabled = var.node_to_node_encryption_enabled + } + + ebs_options { + ebs_enabled = var.ebs_enabled + iops = var.ebs_iops + volume_size = var.ebs_volume_size + volume_type = var.ebs_volume_type + throughput = var.ebs_throughput + } + + snapshot_options { + automated_snapshot_start_hour = var.automated_snapshot_start_hour + } + + auto_tune_options { + desired_state = var.auto_tune_options.desired_state + rollback_on_disable = var.auto_tune_options.rollback_on_disable + } + + advanced_options = var.advanced_options + + access_policies = var.enable_access_policy ? var.access_policies : null + + domain_endpoint_options { + enforce_https = var.domain_endpoint_options.enforce_https + tls_security_policy = var.domain_endpoint_options.tls_security_policy + } + + timeouts { + create = var.create_timeout + } +} + +resource "aws_kms_key" "kms" { + description = "${var.domain_name}-key" + deletion_window_in_days = var.kms_key_delete_window_in_days + enable_key_rotation = var.kms_key_enable_key_rotation + + tags = var.kms_key_tags +} diff --git a/modules/opensearch/monitoring.tf b/modules/opensearch/monitoring.tf new file mode 100644 index 00000000..912d2f82 --- /dev/null +++ b/modules/opensearch/monitoring.tf @@ -0,0 +1 @@ +# TODO: add monitoring diff --git a/modules/opensearch/networking.tf b/modules/opensearch/networking.tf new file mode 100644 index 00000000..940044e7 --- /dev/null +++ b/modules/opensearch/networking.tf @@ -0,0 +1,32 @@ +resource "aws_security_group" "this" { + name = "${var.domain_name}-allow-all-internal-access" + description = "Security group managing access to ${var.domain_name}" + + vpc_id = var.vpc_id + + tags = var.tags +} + +resource "aws_security_group_rule" "allow_egress" { + description = "Allow outgoing traffic for the OpenSearch" + + type = "egress" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = var.cidr_blocks + + security_group_id = aws_security_group.this.id +} + +resource "aws_security_group_rule" "allow_ingress" { + description = "Allow incoming traffic for the OpenSearch on port 443" + + type = "ingress" + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = var.cidr_blocks + + security_group_id = aws_security_group.this.id +} diff --git a/modules/opensearch/outputs.tf b/modules/opensearch/outputs.tf new file mode 100644 index 00000000..a7a295d7 --- /dev/null +++ b/modules/opensearch/outputs.tf @@ -0,0 +1,72 @@ + +output "opensearch_cluster" { + value = aws_opensearch_domain.opensearch_cluster + description = "OpenSearch cluster output" + sensitive = true +} + +output "opensearch_domain_endpoint" { + description = "The endpoint of the OpenSearch domain" + value = aws_opensearch_domain.opensearch_cluster.endpoint + sensitive = false +} + +output "opensearch_domain_arn" { + description = "The ARN of the OpenSearch domain" + value = aws_opensearch_domain.opensearch_cluster.arn + sensitive = false +} + +output "opensearch_domain_id" { + description = "The ID of the OpenSearch domain" + value = aws_opensearch_domain.opensearch_cluster.domain_id + sensitive = false +} + +output "kms_key_arn" { + description = "The ARN of the KMS key used to encrypt the OpenSearch domain" + value = aws_kms_key.kms.arn + sensitive = false +} + +output "kms_key_id" { + description = "The ID of the KMS key used for OpenSearch domain encryption" + value = aws_kms_key.kms.key_id + sensitive = false +} + +output "security_group_id" { + description = "The ID of the security group used by OpenSearch" + value = aws_security_group.this.id + sensitive = false +} + +output "security_group_rule_ingress" { + description = "Ingress rule information for OpenSearch security group" + value = aws_security_group_rule.allow_ingress + sensitive = false +} + +output "security_group_rule_egress" { + description = "Egress rule information for OpenSearch security group" + value = aws_security_group_rule.allow_egress + sensitive = false +} + +output "opensearch_role_name" { + description = "The name of the OpenSearch IAM role" + value = var.iam_create_opensearch_role ? aws_iam_role.opensearch[0].name : "" + sensitive = false +} + +output "opensearch_role_arn" { + description = "The ARN of the OpenSearch IAM role" + value = var.iam_create_opensearch_role ? aws_iam_role.opensearch[0].arn : "" + sensitive = false +} + +output "opensearch_policy_arn" { + description = "The ARN of the OpenSearch access policy" + value = var.iam_create_opensearch_role ? aws_iam_role.opensearch[0].arn : "" + sensitive = false +} diff --git a/modules/opensearch/role.tf b/modules/opensearch/role.tf new file mode 100644 index 00000000..be12de8c --- /dev/null +++ b/modules/opensearch/role.tf @@ -0,0 +1,25 @@ +// IAM Role for OpenSearch +resource "aws_iam_role" "opensearch" { + count = var.iam_create_opensearch_role ? 1 : 0 + + name = var.iam_opensearch_role_name + assume_role_policy = var.iam_role_trust_policy +} + +// IAM Policy for OpenSearch Access +resource "aws_iam_policy" "opensearch_access_policy" { + count = var.iam_create_opensearch_role ? 1 : 0 + + name = "${var.iam_opensearch_role_name}-access-policy" + description = "Access policy for OpenSearch" + + policy = var.iam_opensearch_access_policy +} + +// Attach the policy to the role +resource "aws_iam_role_policy_attachment" "attach_opensearch_policy" { + count = var.iam_create_opensearch_role ? 1 : 0 + + role = aws_iam_role.opensearch[0].name + policy_arn = aws_iam_policy.opensearch_access_policy[0].arn +} diff --git a/modules/opensearch/variables.tf b/modules/opensearch/variables.tf new file mode 100644 index 00000000..66ac4999 --- /dev/null +++ b/modules/opensearch/variables.tf @@ -0,0 +1,317 @@ +# ! Developer: if you are adding a variable without a default value, please ensure to reference it in the cleanup script (.github/actions/eks-cleanup-resources/scripts/destroy.sh) + +variable "domain_name" { + type = string + description = "Name of the domain." +} + +variable "engine_version" { + type = string + description = "OpenSearch version for the domain." + # TODO: add renovate + default = "2.15" +} + +variable "vpc_id" { + type = string + description = "VPC used by the domain." +} + +variable "subnet_ids" { + type = list(string) + description = "The subnet IDs to create the cluster in. For easier usage we are passing through the subnet IDs from the AWS EKS Cluster module." +} + +variable "cidr_blocks" { + type = list(string) + description = "The CIDR blocks to allow access from and to." +} + +variable "security_group_ids" { + type = list(string) + description = "Additional security groups used by the domain." + default = [] +} + +variable "instance_type" { + type = string + default = "t3.small.search" + description = "Instance type of data nodes in the cluster." +} + +variable "instance_count" { + type = number + default = 3 + description = "Number of instances in the cluster." +} + +variable "cold_storage_enabled" { + type = bool + default = false + description = "Indicates cold storage is enabled." +} + +variable "dedicated_master_enabled" { + type = bool + description = "Indicates whether dedicated master nodes are enabled for the cluster." + default = true +} + +variable "dedicated_master_type" { + type = string + description = "Instance type of the dedicated master nodes in the cluster." + default = "m4.large.search" +} + +variable "dedicated_master_count" { + type = number + description = "Number of dedicated master nodes in the cluster." + default = 3 +} + +variable "multi_az_with_standby_enabled" { + type = bool + description = "Whether a multi-AZ domain is turned on with a standby AZ." + default = false +} + +variable "zone_awareness_enabled" { + type = bool + description = "Indicates whether zone awareness is enabled." + default = true +} + +variable "zone_awareness_availability_zone_count" { + type = number + description = "Number of availability zones used." + default = 3 +} + +variable "warm_enabled" { + type = bool + description = "Warm storage is enabled." + default = false +} + +variable "warm_count" { + type = number + description = "Number of warm nodes in the cluster." + default = 2 +} + +variable "warm_type" { + type = string + description = "Instance type for the OpenSearch cluster's warm nodes." + default = "ultrawarm1.medium.search" +} + +variable "tags" { + type = map(string) + default = {} + description = "Tags assigned to the domain." +} + +variable "auto_software_update_enabled" { + type = bool + default = false + description = "Software update auto for the domain." +} + +variable "automated_snapshot_start_hour" { + type = number + default = 0 + description = "Hour during which the service takes an automated daily snapshot of the indices in the domain." +} + +variable "node_to_node_encryption_enabled" { + type = bool + default = true + description = "Whether node to node encryption is enabled." +} + +variable "advanced_options" { + type = map(any) + default = { + "rest.action.multi.allow_explicit_index" = true + } + description = "Key-value string pairs to specify advanced configuration options." +} + +variable "advanced_security_enabled" { + type = bool + default = false + description = "Whether advanced security is enabled." +} + +variable "advanced_security_internal_user_database_enabled" { + type = bool + default = false + description = "Whether the internal user database is enabled." +} + +variable "advanced_security_master_user_name" { + type = string + default = "opensearch-admin" + description = "Main user's username, which is stored in the Amazon Elasticsearch Service domain's internal database. Only specify if `advanced_security_internal_user_database_enabled` is set to true." +} + +variable "advanced_security_master_user_password" { + type = string + default = "" + description = "Main user's password, which is stored in the Amazon Elasticsearch Service domain's internal database. Only specify if `advanced_security_internal_user_database_enabled` is set to true." +} + +variable "advanced_security_anonymous_auth_enabled" { + type = bool + description = "Whether the anonymous auth is enabled." + default = false +} + +variable "access_policies" { + type = string + default = "{}" + description = "IAM policy document specifying the access policies for the domain." +} + +variable "create_timeout" { + type = string + description = "How much time to wait for the creation before timing out." + default = "2h" +} + +variable "ebs_enabled" { + type = bool + description = "Whether EBS volumes are attached to data nodes in the domain." + default = true +} + +variable "ebs_iops" { + type = number + default = 3000 + description = "Baseline input/output (I/O) performance of EBS volumes attached to data nodes. Applicable only for the GP3 and Provisioned IOPS EBS volume types." +} + +variable "ebs_throughput" { + type = number + default = 125 + description = "(Required if `ebs_volume_type` is set to gp3) Specifies the throughput (in MiB/s) of the EBS volumes attached to data nodes. Applicable only for the gp3 volume type." +} + +variable "ebs_volume_type" { + type = string + default = "gp3" + description = "Type of EBS volumes attached to data nodes." +} + +variable "ebs_volume_size" { + type = number + description = "Size of EBS volumes attached to data nodes." + default = 64 +} + +variable "enable_access_policy" { + type = bool + default = true + description = "Determines whether an access policy will be applied to the domain" +} + +variable "auto_tune_options" { + type = any + description = "Configuration block for the Auto-Tune options of the domain" + default = { "desired_state" : "DISABLED", "rollback_on_disable" : "NO_ROLLBACK" } +} + +variable "domain_endpoint_options" { + type = any + description = "Configuration block for domain endpoint HTTP(S) related options" + default = { "enforce_https" : true, "tls_security_policy" : "Policy-Min-TLS-1-2-2019-07" } +} + +variable "ip_address_type" { + type = string + default = "ipv4" + description = "The IP address type for the endpoint. Valid values are ipv4 and dualstack" +} + +variable "off_peak_window_enabled" { + type = bool + default = true + description = "Whether to enable off peak update" +} + +variable "kms_key_delete_window_in_days" { + type = number + description = "The number of days before the KMS key is deleted after being disabled." + default = 7 +} + +variable "kms_key_enable_key_rotation" { + type = bool + description = "Specifies whether automatic key rotation is enabled for the KMS key." + default = true +} + +variable "kms_key_tags" { + type = map(string) + description = "The tags to associate with the KMS key." + default = {} +} + +variable "iam_create_opensearch_role" { + description = "Flag to determine if the OpenSearch role should be created" + type = bool + default = false +} + +variable "iam_opensearch_role_name" { + description = "Name of the OpenSearch IAM role" + type = string + default = "OpenSearchRole" +} + +variable "iam_role_trust_policy" { + description = "Assume role trust policy for OpenSearch role" + type = string + default = <:oidc-provider/oidc.eks..amazonaws.com/id/" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "oidc.eks..amazonaws.com/id/:sub": "system:serviceaccount::" + } + } + } + ] + } + +EOF +} + +variable "iam_opensearch_access_policy" { + description = "Access policy for OpenSearch allowing access" + type = string + default = <::domain//*" + } + ] + } + +EOF +} diff --git a/test/src/custom_eks_opensearch_test.go b/test/src/custom_eks_opensearch_test.go new file mode 100644 index 00000000..386de6e4 --- /dev/null +++ b/test/src/custom_eks_opensearch_test.go @@ -0,0 +1,330 @@ +package test + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/iam" + "github.com/aws/aws-sdk-go-v2/service/opensearch" + "github.com/aws/aws-sdk-go-v2/service/opensearch/types" + "github.com/aws/aws-sdk-go-v2/service/sts" + "github.com/camunda/camunda-tf-eks-module/utils" + "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/gruntwork-io/terratest/modules/random" + "github.com/gruntwork-io/terratest/modules/terraform" + test_structure "github.com/gruntwork-io/terratest/modules/test-structure" + "github.com/stretchr/testify/suite" + "go.uber.org/zap" + "go.uber.org/zap/zaptest" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "os" + "path/filepath" + "strings" + "testing" + "time" +) + +type CustomEKSOpenSearchTestSuite struct { + suite.Suite + logger *zap.Logger + sugaredLogger *zap.SugaredLogger + clusterName string + expectedNodes int + kubeConfigPath string + region string + bucketRegion string + tfDataDir string + tfBinaryName string + varTf map[string]interface{} + tfStateS3Bucket string +} + +func (suite *CustomEKSOpenSearchTestSuite) SetupTest() { + suite.logger = zaptest.NewLogger(suite.T()) + suite.sugaredLogger = suite.logger.Sugar() + + clusterSuffix := utils.GetEnv("TESTS_CLUSTER_ID", strings.ToLower(random.UniqueId())) + suite.clusterName = fmt.Sprintf("cl-os-%s", clusterSuffix) + suite.region = utils.GetEnv("TESTS_CLUSTER_REGION", "eu-central-1") + suite.bucketRegion = utils.GetEnv("TF_STATE_BUCKET_REGION", suite.region) + suite.tfBinaryName = utils.GetEnv("TESTS_TF_BINARY_NAME", "terraform") + suite.sugaredLogger.Infow("Terraform binary for the suite", "binary", suite.tfBinaryName) + + suite.expectedNodes = 1 + var errAbsPath error + suite.tfStateS3Bucket = utils.GetEnv("TF_STATE_BUCKET", fmt.Sprintf("tests-eks-tf-state-%s", suite.bucketRegion)) + suite.tfDataDir, errAbsPath = filepath.Abs(fmt.Sprintf("../../test/states/tf-data-%s", suite.clusterName)) + suite.Require().NoError(errAbsPath) + suite.kubeConfigPath = fmt.Sprintf("%s/kubeconfig-opensearch-eks", suite.tfDataDir) +} + +func (suite *CustomEKSOpenSearchTestSuite) TearUpTest() { + // create tf state + absPath, err := filepath.Abs(suite.tfDataDir) + suite.Require().NoError(err) + err = os.MkdirAll(absPath, os.ModePerm) + suite.Require().NoError(err) +} + +func (suite *CustomEKSOpenSearchTestSuite) TearDownTest() { + suite.T().Log("Cleaning up resources...") + + err := os.Remove(suite.kubeConfigPath) + if err != nil && !os.IsNotExist(err) { + suite.T().Errorf("Failed to remove kubeConfigPath: %v", err) + } +} + +// TestCustomEKSAndOpenSearch spawns a custom EKS cluster with custom parameters, and spawns a +// a curl pod that will try to reach the OpenSearch cluster +func (suite *CustomEKSOpenSearchTestSuite) TestCustomEKSAndOpenSearch() { + suite.varTf = map[string]interface{}{ + "name": suite.clusterName, + "region": suite.region, + "np_desired_node_count": suite.expectedNodes, + } + + suite.sugaredLogger.Infow("Creating EKS cluster...", "extraVars", suite.varTf) + + tfModuleEKS := "eks-cluster/" + fullDirEKS := fmt.Sprintf("%s%s", suite.tfDataDir, tfModuleEKS) + errTfDirEKS := os.MkdirAll(fullDirEKS, os.ModePerm) + suite.Require().NoError(errTfDirEKS) + tfDir := test_structure.CopyTerraformFolderToDest(suite.T(), "../../modules/", tfModuleEKS, fullDirEKS) + + errLinkBackend := os.Link("../../modules/fixtures/backend.tf", filepath.Join(tfDir, "backend.tf")) + suite.Require().NoError(errLinkBackend) + + terraformOptions := &terraform.Options{ + TerraformBinary: suite.tfBinaryName, + TerraformDir: tfDir, + Upgrade: false, + VarFiles: []string{"../fixtures/fixtures.default.eks.tfvars"}, + Vars: suite.varTf, + BackendConfig: map[string]interface{}{ + "bucket": suite.tfStateS3Bucket, + "key": fmt.Sprintf("terraform/%s/TestCustomEKSOpenSearchTestSuite/%sterraform.tfstate", suite.clusterName, tfModuleEKS), + "region": suite.bucketRegion, + }, + } + + // configure bucket backend + sessBackend, err := utils.GetAwsClientF(utils.GetAwsProfile(), suite.bucketRegion) + suite.Require().NoErrorf(err, "Failed to get aws client") + err = utils.CreateS3BucketIfNotExists(sessBackend, suite.tfStateS3Bucket, utils.TF_BUCKET_DESCRIPTION, suite.bucketRegion) + suite.Require().NoErrorf(err, "Failed to create s3 state bucket") + + cleanClusterAtTheEnd := utils.GetEnv("CLEAN_CLUSTER_AT_THE_END", "true") + if cleanClusterAtTheEnd == "true" { + defer utils.DeferCleanup(suite.T(), suite.bucketRegion, terraformOptions) + } + + // due to output of the creation changing tags from null to {}, we can't pass the + // idempotency test + terraform.InitAndApply(suite.T(), terraformOptions) + + sess, err := utils.GetAwsClient() + suite.Require().NoErrorf(err, "Failed to get aws client") + + eksSvc := eks.NewFromConfig(sess) + openSearchSvc := opensearch.NewFromConfig(sess) + stsSvc := sts.NewFromConfig(sess) + iamSvc := iam.NewFromConfig(sess) + + inputEKS := &eks.DescribeClusterInput{ + Name: aws.String(suite.clusterName), + } + + result, err := eksSvc.DescribeCluster(context.Background(), inputEKS) + suite.sugaredLogger.Infow("eks describe cluster result", "result", result, "err", err) + suite.Assert().NoError(err) + + utils.GenerateKubeConfigFromAWS(suite.T(), suite.region, suite.clusterName, utils.GetAwsProfile(), suite.kubeConfigPath) + + // Spawn OpenSearch within the EKS VPC/subnet + publicBlocks := strings.Fields(strings.Trim(terraform.Output(suite.T(), terraformOptions, "public_vpc_cidr_blocks"), "[]")) + privateBlocks := strings.Fields(strings.Trim(terraform.Output(suite.T(), terraformOptions, "private_vpc_cidr_blocks"), "[]")) + + opensearchDomainName := fmt.Sprintf("os-%s", suite.clusterName) + + // Extract OIDC issuer and create the IRSA role with RDS OpenSearch access + oidcProviderID, errorOIDC := utils.ExtractOIDCProviderID(result) + suite.Require().NoError(errorOIDC) + suite.Assert().NotEmpty(terraform.Output(suite.T(), terraformOptions, "oidc_provider_id")) + suite.Require().Equal(oidcProviderID, terraform.Output(suite.T(), terraformOptions, "oidc_provider_id")) + + stsIdentity, err := stsSvc.GetCallerIdentity(context.TODO(), &sts.GetCallerIdentityInput{}) + suite.Require().NoError(err, "Failed to get AWS account ID") + accountId := *stsIdentity.Account + suite.Assert().NotEmpty(terraform.Output(suite.T(), terraformOptions, "aws_caller_identity_account_id")) + suite.Require().Equal(accountId, terraform.Output(suite.T(), terraformOptions, "aws_caller_identity_account_id")) + + openSearchArn := fmt.Sprintf("arn:aws:es:%s:%s:domain/%s/*", suite.region, accountId, opensearchDomainName) + suite.sugaredLogger.Infow("OpenSearch infos", "accountId", accountId, "openSearchArn", openSearchArn) + + // Create namespace and associated service account in EKS + openSearchNamespace := "opensearch" + openSearchServiceAccount := "opensearch-access-sa" + openSearchRole := fmt.Sprintf("OpenSearchRole-%s", suite.clusterName) + openSearchKubectlOptions := k8s.NewKubectlOptions("", suite.kubeConfigPath, openSearchNamespace) + utils.CreateIfNotExistsNamespace(suite.T(), openSearchKubectlOptions, openSearchNamespace) + utils.CreateIfNotExistsServiceAccount(suite.T(), openSearchKubectlOptions, openSearchServiceAccount, map[string]string{ + "eks.amazonaws.com/role-arn": fmt.Sprintf("arn:aws:iam::%s:role/%s", accountId, openSearchRole), + }) + + openSearchAccessPolicy := fmt.Sprintf(`{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "es:ESHttpGet", + "es:ESHttpPut", + "es:ESHttpPost" + ], + "Resource": "arn:aws:es:%s:%s:domain/%s/*" + } + ] +}`, suite.region, accountId, opensearchDomainName) + + iamRoleTrustPolicy := fmt.Sprintf(`{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::%s:oidc-provider/%s" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "%s:sub": "system:serviceaccount:%s:%s" + } + } + } + ] +}`, accountId, oidcProviderID, oidcProviderID, openSearchNamespace, openSearchServiceAccount) + + varsConfigOpenSearch := map[string]interface{}{ + "domain_name": opensearchDomainName, + "subnet_ids": result.Cluster.ResourcesVpcConfig.SubnetIds, + "cidr_blocks": append(publicBlocks, privateBlocks...), + "vpc_id": *result.Cluster.ResourcesVpcConfig.VpcId, + "iam_create_opensearch_role": true, + "iam_opensearch_role_name": openSearchRole, + "iam_role_trust_policy": iamRoleTrustPolicy, + "iam_opensearch_access_policy": openSearchAccessPolicy, + } + + tfModuleOpenSearch := "opensearch/" + fullDirOpenSearch := fmt.Sprintf("%s/%s", suite.tfDataDir, tfModuleOpenSearch) + errTfDirOpenSearch := os.MkdirAll(fullDirOpenSearch, os.ModePerm) + suite.Require().NoError(errTfDirOpenSearch) + + tfDirOpenSearch := test_structure.CopyTerraformFolderToDest(suite.T(), "../../modules/", tfModuleOpenSearch, fullDirOpenSearch) + + errLinkBackend = os.Link("../../modules/fixtures/backend.tf", filepath.Join(tfDirOpenSearch, "backend.tf")) + suite.Require().NoError(errLinkBackend) + + terraformOptionsOpenSearch := &terraform.Options{ + TerraformBinary: suite.tfBinaryName, + TerraformDir: tfDirOpenSearch, + Upgrade: false, + VarFiles: []string{"../fixtures/fixtures.default.opensearch.tfvars"}, + Vars: varsConfigOpenSearch, + BackendConfig: map[string]interface{}{ + "bucket": suite.tfStateS3Bucket, + "key": fmt.Sprintf("terraform/%s/TestCustomEKSOpenSearchTestSuite/%sterraform.tfstate", suite.clusterName, tfModuleOpenSearch), + "region": suite.bucketRegion, + }, + } + + if cleanClusterAtTheEnd == "true" { + defer utils.DeferCleanup(suite.T(), suite.bucketRegion, terraformOptionsOpenSearch) + } + + terraform.InitAndApplyAndIdempotent(suite.T(), terraformOptionsOpenSearch) + opensearchEndpoint := terraform.Output(suite.T(), terraformOptionsOpenSearch, "opensearch_domain_endpoint") + suite.Assert().NotEmpty(opensearchEndpoint) + + // Test the OpenSearch connection and perform additional tests as needed + + // Retrieve OpenSearch information + describeDomainInput := &opensearch.DescribeDomainInput{ + DomainName: aws.String(varsConfigOpenSearch["domain_name"].(string)), + } + describeOpenSearchDomainOutput, err := openSearchSvc.DescribeDomain(context.Background(), describeDomainInput) + suite.Require().NoError(err) + suite.sugaredLogger.Infow("Domain info", "domain", describeOpenSearchDomainOutput) + + suite.sugaredLogger.Infow("DescribeDomain info", "domain", describeOpenSearchDomainOutput.DomainStatus.EngineVersion) + + // Perform assertions on the OpenSearch domain configuration + suite.Assert().Equal(varsConfigOpenSearch["domain_name"].(string), *describeOpenSearchDomainOutput.DomainStatus.DomainName) + suite.Assert().Equal(int32(4), *describeOpenSearchDomainOutput.DomainStatus.ClusterConfig.InstanceCount) + suite.Assert().Equal(types.OpenSearchPartitionInstanceType("t3.small.search"), describeOpenSearchDomainOutput.DomainStatus.ClusterConfig.InstanceType) + suite.Assert().Equal(varsConfigOpenSearch["vpc_id"].(string), *describeOpenSearchDomainOutput.DomainStatus.VPCOptions.VPCId) + + // Verify security group information + suite.Assert().NotEmpty(describeOpenSearchDomainOutput.DomainStatus.VPCOptions.SecurityGroupIds) + + // Retrieve the IAM Role associated with OpenSearch + describeOpenSearchRoleInput := &iam.GetRoleInput{ + RoleName: aws.String(varsConfigOpenSearch["iam_opensearch_role_name"].(string)), + } + _, err = iamSvc.GetRole(context.Background(), describeOpenSearchRoleInput) + suite.Require().NoError(err) + + // Verify IAM Policy Attachment + listAttachedPoliciesInput := &iam.ListAttachedRolePoliciesInput{ + RoleName: aws.String(varsConfigOpenSearch["iam_opensearch_role_name"].(string)), + } + _, err = iamSvc.ListAttachedRolePolicies(context.Background(), listAttachedPoliciesInput) + suite.Require().NoError(err) + + // Test the OpenSearch connection and perform additional tests as needed + suite.Assert().NotEmpty(opensearchEndpoint) + configMapScript := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "opensearch-config", + Namespace: openSearchNamespace, + }, + Data: map[string]string{ + "opensearch_endpoint": opensearchEndpoint, + "aws_region": suite.region, + }, + } + + // spawn a kubeclient + kubeClient, errKubeClient := utils.NewKubeClientSet(result.Cluster) + suite.Require().NoError(errKubeClient) + + err = kubeClient.CoreV1().ConfigMaps(openSearchNamespace).Delete(context.Background(), configMapScript.Name, metav1.DeleteOptions{}) + if err != nil && !errors.IsNotFound(err) { + suite.Require().NoError(err) + } + _, err = kubeClient.CoreV1().ConfigMaps(openSearchNamespace).Create(context.Background(), configMapScript, metav1.CreateOptions{}) + k8s.WaitUntilConfigMapAvailable(suite.T(), openSearchKubectlOptions, configMapScript.Name, 6, 10*time.Second) + + // cleanup existing jobs + jobListOptions := metav1.ListOptions{LabelSelector: "app=opensearch-client"} + existingJobs := k8s.ListJobs(suite.T(), openSearchKubectlOptions, jobListOptions) + backgroundDeletion := metav1.DeletePropagationBackground + for _, job := range existingJobs { + err := kubeClient.BatchV1().Jobs(openSearchNamespace).Delete(context.Background(), job.Name, metav1.DeleteOptions{PropagationPolicy: &backgroundDeletion}) + suite.Assert().NoError(err) + } + + // deploy the opensearch-client Job to test the connection + k8s.KubectlApply(suite.T(), openSearchKubectlOptions, "../../modules/fixtures/opensearch-client.yml") + errJob := utils.WaitForJobCompletion(kubeClient, openSearchNamespace, "opensearch-client", 5*time.Minute, jobListOptions) + suite.Require().NoError(errJob) +} + +func TestCustomEKSOpenSearchTestSuite(t *testing.T) { + t.Parallel() + suite.Run(t, new(CustomEKSOpenSearchTestSuite)) +} diff --git a/test/src/custom_eks_rds_test.go b/test/src/custom_eks_rds_test.go index 94a7c155..28dd8dff 100644 --- a/test/src/custom_eks_rds_test.go +++ b/test/src/custom_eks_rds_test.go @@ -6,11 +6,13 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/eks" "github.com/aws/aws-sdk-go-v2/service/rds" + "github.com/aws/aws-sdk-go-v2/service/sts" "github.com/camunda/camunda-tf-eks-module/utils" "github.com/gruntwork-io/terratest/modules/k8s" "github.com/gruntwork-io/terratest/modules/random" "github.com/gruntwork-io/terratest/modules/terraform" test_structure "github.com/gruntwork-io/terratest/modules/test-structure" + "github.com/sethvargo/go-password/password" "github.com/stretchr/testify/suite" "go.uber.org/zap" "go.uber.org/zap/zaptest" @@ -119,7 +121,8 @@ func (suite *CustomEKSRDSTestSuite) TestCustomEKSAndRDS() { defer utils.DeferCleanup(suite.T(), suite.bucketRegion, terraformOptions) } - // since v20, we can't use InitAndApplyAndIdempotent due to labels being added + // due to output of the creation changing tags from null to {}, we can't pass the + // idempotency test terraform.InitAndApply(suite.T(), terraformOptions) sess, err := utils.GetAwsClient() @@ -128,6 +131,7 @@ func (suite *CustomEKSRDSTestSuite) TestCustomEKSAndRDS() { // list your services here eksSvc := eks.NewFromConfig(sess) rdsSvc := rds.NewFromConfig(sess) + stsSvc := sts.NewFromConfig(sess) inputEKS := &eks.DescribeClusterInput{ Name: aws.String(suite.clusterName), @@ -144,20 +148,84 @@ func (suite *CustomEKSRDSTestSuite) TestCustomEKSAndRDS() { publicBlocks := strings.Fields(strings.Trim(terraform.Output(suite.T(), terraformOptions, "public_vpc_cidr_blocks"), "[]")) privateBlocks := strings.Fields(strings.Trim(terraform.Output(suite.T(), terraformOptions, "private_vpc_cidr_blocks"), "[]")) - auroraUsername := "myuser" - auroraPassword := "mypassword123secure" + // Extract OIDC issuer and create the IRSA role with RDS Aurora access + oidcProviderID, errorOIDC := utils.ExtractOIDCProviderID(result) + suite.Require().NoError(errorOIDC) + + stsIdentity, err := stsSvc.GetCallerIdentity(context.TODO(), &sts.GetCallerIdentityInput{}) + suite.Require().NoError(err, "Failed to get AWS account ID") + + accountId := *stsIdentity.Account + auroraClusterName := fmt.Sprintf("postgres-%s", suite.clusterName) + auroraUsername := "adminuser" + auroraPassword, errPassword := password.Generate(18, 4, 0, false, false) + suite.Require().NoError(errPassword) auroraDatabase := "camunda" + // Define the ARN for RDS IAM DB Auth + auroraIRSAUsername := "myirsauser" + auroraArn := fmt.Sprintf("arn:aws:rds-db:%s:%s:dbuser:%s/%s", suite.region, accountId, auroraClusterName, auroraIRSAUsername) + suite.sugaredLogger.Infow("Aurora RDS IAM infos", "accountId", accountId, "auroraArn", auroraArn) + + utils.GenerateKubeConfigFromAWS(suite.T(), suite.region, suite.clusterName, utils.GetAwsProfile(), suite.kubeConfigPath) + + // Create namespace and associated service account in EKS + auroraNamespace := "aurora" + auroraServiceAccount := "aurora-access-sa" + auroraRole := fmt.Sprintf("AuroraRole-%s", suite.clusterName) + auroraKubectlOptions := k8s.NewKubectlOptions("", suite.kubeConfigPath, auroraNamespace) + utils.CreateIfNotExistsNamespace(suite.T(), auroraKubectlOptions, auroraNamespace) + utils.CreateIfNotExistsServiceAccount(suite.T(), auroraKubectlOptions, auroraServiceAccount, map[string]string{ + "eks.amazonaws.com/role-arn": fmt.Sprintf("arn:aws:iam::%s:role/%s", accountId, auroraRole), + }) + + // Define the Aurora access policy for IAM DB Auth + auroraAccessPolicy := fmt.Sprintf(`{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "rds-db:connect" + ], + "Resource": "arn:aws:rds-db:%s:%s:dbuser:%s/%s" + } + ] +}`, suite.region, accountId, auroraClusterName, auroraIRSAUsername) + + // Define the trust policy for Aurora IAM role + iamRoleTrustPolicy := fmt.Sprintf(`{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::%s:oidc-provider/%s" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "%s:sub": "system:serviceaccount:%s:%s" + } + } + } + ] +}`, accountId, oidcProviderID, oidcProviderID, auroraNamespace, auroraServiceAccount) + varsConfigAurora := map[string]interface{}{ - "username": auroraUsername, - "password": auroraPassword, - "default_database_name": auroraDatabase, - "cluster_name": fmt.Sprintf("postgres-%s", suite.clusterName), - "subnet_ids": result.Cluster.ResourcesVpcConfig.SubnetIds, - "vpc_id": *result.Cluster.ResourcesVpcConfig.VpcId, - "availability_zones": []string{fmt.Sprintf("%sa", suite.region), fmt.Sprintf("%sb", suite.region), fmt.Sprintf("%sc", suite.region)}, - "cidr_blocks": append(publicBlocks, privateBlocks...), - "iam_auth_enabled": true, + "username": auroraUsername, + "password": auroraPassword, + "default_database_name": auroraDatabase, + "cluster_name": auroraClusterName, + "subnet_ids": result.Cluster.ResourcesVpcConfig.SubnetIds, + "vpc_id": *result.Cluster.ResourcesVpcConfig.VpcId, + "availability_zones": []string{fmt.Sprintf("%sa", suite.region), fmt.Sprintf("%sb", suite.region), fmt.Sprintf("%sc", suite.region)}, + "cidr_blocks": append(publicBlocks, privateBlocks...), + "iam_auth_enabled": true, + "iam_create_aurora_role": true, + "iam_aurora_role_name": auroraRole, + "iam_role_trust_policy": iamRoleTrustPolicy, + "iam_aurora_access_policy": auroraAccessPolicy, } tfModuleAurora := "aurora/" @@ -187,97 +255,72 @@ func (suite *CustomEKSRDSTestSuite) TestCustomEKSAndRDS() { defer utils.DeferCleanup(suite.T(), suite.bucketRegion, terraformOptionsRDS) } - terraform.InitAndApply(suite.T(), terraformOptionsRDS) + terraform.InitAndApplyAndIdempotent(suite.T(), terraformOptionsRDS) auroraEndpoint := terraform.Output(suite.T(), terraformOptionsRDS, "aurora_endpoint") suite.Assert().NotEmpty(auroraEndpoint) // Test of the RDS connection is performed by launching a pod on the cluster and test the pg connection - kubeClient, err := utils.NewKubeClientSet(result.Cluster) - suite.Require().NoError(err) - - utils.GenerateKubeConfigFromAWS(suite.T(), suite.region, suite.clusterName, utils.GetAwsProfile(), suite.kubeConfigPath) - - namespace := "postgres-client" - pgKubeCtlOptions := k8s.NewKubectlOptions("", suite.kubeConfigPath, namespace) - utils.CreateIfNotExistsNamespace(suite.T(), pgKubeCtlOptions, namespace) + pgKubeCtlOptions := k8s.NewKubectlOptions("", suite.kubeConfigPath, auroraNamespace) // deploy the postgres-client ConfigMap configMapPostgres := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "aurora-config", - Namespace: namespace, + Namespace: auroraNamespace, }, Data: map[string]string{ "aurora_endpoint": auroraEndpoint, "aurora_username": auroraUsername, - "aurora_username_irsa": fmt.Sprintf("%s-irsa", auroraUsername), + "aurora_password": auroraPassword, + "aurora_username_irsa": auroraIRSAUsername, "aurora_port": "5432", "aws_region": suite.region, "aurora_db_name": auroraDatabase, }, } - err = kubeClient.CoreV1().ConfigMaps(namespace).Delete(context.Background(), configMapPostgres.Name, metav1.DeleteOptions{}) + // create a kubeclient + kubeClient, err := utils.NewKubeClientSet(result.Cluster) + suite.Require().NoError(err) + + err = kubeClient.CoreV1().ConfigMaps(auroraNamespace).Delete(context.Background(), configMapPostgres.Name, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { suite.Require().NoError(err) } - _, err = kubeClient.CoreV1().ConfigMaps(namespace).Create(context.Background(), configMapPostgres, metav1.CreateOptions{}) + _, err = kubeClient.CoreV1().ConfigMaps(auroraNamespace).Create(context.Background(), configMapPostgres, metav1.CreateOptions{}) k8s.WaitUntilConfigMapAvailable(suite.T(), pgKubeCtlOptions, configMapPostgres.Name, 6, 10*time.Second) // create the secret for aurora pg password secretPostgres := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "aurora-secret", - Namespace: namespace, + Namespace: auroraNamespace, }, StringData: map[string]string{ "aurora_password": auroraPassword, }, } - err = kubeClient.CoreV1().Secrets(namespace).Delete(context.Background(), configMapPostgres.Name, metav1.DeleteOptions{}) + err = kubeClient.CoreV1().Secrets(auroraNamespace).Delete(context.Background(), secretPostgres.Name, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { suite.Require().NoError(err) } - _, err = kubeClient.CoreV1().Secrets(namespace).Create(context.Background(), secretPostgres, metav1.CreateOptions{}) + _, err = kubeClient.CoreV1().Secrets(auroraNamespace).Create(context.Background(), secretPostgres, metav1.CreateOptions{}) k8s.WaitUntilSecretAvailable(suite.T(), pgKubeCtlOptions, secretPostgres.Name, 6, 10*time.Second) - // add the scripts as a ConfigMap - scriptPath := "../../modules/fixtures/scripts/create_aurora_pg_db.sh" - scriptContent, err := os.ReadFile(scriptPath) - suite.Require().NoError(err) - - configMapScript := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "postgres-scripts", - Namespace: namespace, - }, - Data: map[string]string{ - "create_aurora_pg_db.sh": string(scriptContent), - }, - } - - err = kubeClient.CoreV1().ConfigMaps(namespace).Delete(context.Background(), configMapScript.Name, metav1.DeleteOptions{}) - if err != nil && !errors.IsNotFound(err) { - suite.Require().NoError(err) - } - _, err = kubeClient.CoreV1().ConfigMaps(namespace).Create(context.Background(), configMapScript, metav1.CreateOptions{}) - k8s.WaitUntilConfigMapAvailable(suite.T(), pgKubeCtlOptions, configMapScript.Name, 6, 10*time.Second) - // cleanup existing jobs jobListOptions := metav1.ListOptions{LabelSelector: "app=postgres-client"} existingJobs := k8s.ListJobs(suite.T(), pgKubeCtlOptions, jobListOptions) + backgroundDeletion := metav1.DeletePropagationBackground for _, job := range existingJobs { - err := kubeClient.BatchV1().Jobs(namespace).Delete(context.Background(), job.Name, metav1.DeleteOptions{}) + err := kubeClient.BatchV1().Jobs(auroraNamespace).Delete(context.Background(), job.Name, metav1.DeleteOptions{PropagationPolicy: &backgroundDeletion}) suite.Assert().NoError(err) } // deploy the postgres-client Job to test the connection k8s.KubectlApply(suite.T(), pgKubeCtlOptions, "../../modules/fixtures/postgres-client.yml") - errJob := utils.WaitForJobCompletion(kubeClient, namespace, "postgres-client", 5*time.Minute, jobListOptions) + errJob := utils.WaitForJobCompletion(kubeClient, auroraNamespace, "postgres-client", 5*time.Minute, jobListOptions) suite.Require().NoError(errJob) - // TODO: test IRSA apply https://kubedemy.io/aws-eks-part-13-setup-iam-roles-for-service-accounts-irsa to setup iam - // Retrieve RDS information describeDBClusterInput := &rds.DescribeDBClustersInput{ DBClusterIdentifier: aws.String(varsConfigAurora["cluster_name"].(string)), @@ -290,7 +333,6 @@ func (suite *CustomEKSRDSTestSuite) TestCustomEKSAndRDS() { suite.Assert().Equal(varsConfigAurora["username"].(string), *describeDBClusterOutput.DBClusters[0].MasterUsername) suite.Assert().Equal(auroraDatabase, *describeDBClusterOutput.DBClusters[0].DatabaseName) suite.Assert().Equal(int32(5432), *describeDBClusterOutput.DBClusters[0].Port) - suite.Assert().Equal("15.4", *describeDBClusterOutput.DBClusters[0].EngineVersion) suite.Assert().ElementsMatch(expectedRDSAZ, describeDBClusterOutput.DBClusters[0].AvailabilityZones) suite.Assert().Equal(varsConfigAurora["cluster_name"].(string), *describeDBClusterOutput.DBClusters[0].DBClusterIdentifier) diff --git a/test/src/default_eks_test.go b/test/src/default_eks_test.go index d5df6919..9d2907c6 100644 --- a/test/src/default_eks_test.go +++ b/test/src/default_eks_test.go @@ -121,7 +121,8 @@ func (suite *DefaultEKSTestSuite) TestDefaultEKS() { defer utils.DeferCleanup(suite.T(), suite.bucketRegion, terraformOptions) } - // since v20, we can't use InitAndApplyAndIdempotent due to labels being added + // due to output of the creation changing tags from null to {}, we can't pass the + // idempotency test terraform.InitAndApply(suite.T(), terraformOptions) suite.baseChecksEKS(terraformOptions) } diff --git a/test/src/go.mod b/test/src/go.mod index f1d64672..b45ba063 100644 --- a/test/src/go.mod +++ b/test/src/go.mod @@ -9,10 +9,13 @@ require ( github.com/aws/aws-sdk-go-v2/service/eks v1.50.0 github.com/aws/aws-sdk-go-v2/service/iam v1.37.0 github.com/aws/aws-sdk-go-v2/service/kms v1.37.0 + github.com/aws/aws-sdk-go-v2/service/opensearch v1.40.2 github.com/aws/aws-sdk-go-v2/service/rds v1.87.0 github.com/aws/aws-sdk-go-v2/service/s3 v1.65.0 + github.com/aws/aws-sdk-go-v2/service/sts v1.32.0 github.com/aws/smithy-go v1.22.0 github.com/gruntwork-io/terratest v0.47.2 + github.com/sethvargo/go-password v0.3.1 github.com/stretchr/testify v1.9.0 go.uber.org/zap v1.27.0 k8s.io/api v0.31.1 @@ -42,7 +45,6 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.0 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.24.0 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.32.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect diff --git a/test/src/go.sum b/test/src/go.sum index 60fde8b5..2c4e4986 100644 --- a/test/src/go.sum +++ b/test/src/go.sum @@ -201,242 +201,52 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go v1.54.6 h1:HEYUib3yTt8E6vxjMWM3yAq5b+qjj/6aKA62mkgux9g= github.com/aws/aws-sdk-go v1.54.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= -github.com/aws/aws-sdk-go-v2 v1.30.4 h1:frhcagrVNrzmT95RJImMHgabt99vkXGslubDaDagTk8= -github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= -github.com/aws/aws-sdk-go-v2 v1.30.5 h1:mWSRTwQAb0aLE17dSzztCVJWI9+cRMgqebndjwDyK0g= -github.com/aws/aws-sdk-go-v2 v1.30.5/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= -github.com/aws/aws-sdk-go-v2 v1.31.0 h1:3V05LbxTSItI5kUqNwhJrrrY1BAXxXt0sN0l72QmG5U= -github.com/aws/aws-sdk-go-v2 v1.31.0/go.mod h1:ztolYtaEUtdpf9Wftr31CJfLVjOnD/CVRkKOOYgF8hA= github.com/aws/aws-sdk-go-v2 v1.32.0 h1:GuHp7GvMN74PXD5C97KT5D87UhIy4bQPkflQKbfkndg= github.com/aws/aws-sdk-go-v2 v1.32.0/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 h1:tW1/Rkad38LA15X4UQtjXZXNKsCgkshC3EbmcUmghTg= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3/go.mod h1:UbnqO+zjqk3uIt9yCACHJ9IVNhyhOCnYk8yA19SAWrM= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.5 h1:xDAuZTn4IMm8o1LnBZvmrL8JA1io4o3YWNXgohbf20g= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.5/go.mod h1:wYSv6iDS621sEFLfKvpPE2ugjTuGlAG7iROg0hLOkfc= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 h1:pT3hpW0cOHRJx8Y0DfJUEQuqPild8jRGmSFmBgvydr0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6/go.mod h1:j/I2++U0xX+cr44QjHay4Cvxj6FUbnxrgmqN3H1jTZA= -github.com/aws/aws-sdk-go-v2/config v1.27.28 h1:OTxWGW/91C61QlneCtnD62NLb4W616/NM1jA8LhJqbg= -github.com/aws/aws-sdk-go-v2/config v1.27.28/go.mod h1:uzVRVtJSU5EFv6Fu82AoVFKozJi2ZCY6WRCXj06rbvs= -github.com/aws/aws-sdk-go-v2/config v1.27.30 h1:AQF3/+rOgeJBQP3iI4vojlPib5X6eeOYoa/af7OxAYg= -github.com/aws/aws-sdk-go-v2/config v1.27.30/go.mod h1:yxqvuubha9Vw8stEgNiStO+yZpP68Wm9hLmcm+R/Qk4= -github.com/aws/aws-sdk-go-v2/config v1.27.31 h1:kxBoRsjhT3pq0cKthgj6RU6bXTm/2SgdoUMyrVw0rAI= -github.com/aws/aws-sdk-go-v2/config v1.27.31/go.mod h1:z04nZdSWFPaDwK3DdJOG2r+scLQzMYuJeW0CujEm9FM= -github.com/aws/aws-sdk-go-v2/config v1.27.33 h1:Nof9o/MsmH4oa0s2q9a0k7tMz5x/Yj5k06lDODWz3BU= -github.com/aws/aws-sdk-go-v2/config v1.27.33/go.mod h1:kEqdYzRb8dd8Sy2pOdEbExTTF5v7ozEXX0McgPE7xks= -github.com/aws/aws-sdk-go-v2/config v1.27.37 h1:xaoIwzHVuRWRHFI0jhgEdEGc8xE1l91KaeRDsWEIncU= -github.com/aws/aws-sdk-go-v2/config v1.27.37/go.mod h1:S2e3ax9/8KnMSyRVNd3sWTKs+1clJ2f1U6nE0lpvQRg= -github.com/aws/aws-sdk-go-v2/config v1.27.39 h1:FCylu78eTGzW1ynHcongXK9YHtoXD5AiiUqq3YfJYjU= -github.com/aws/aws-sdk-go-v2/config v1.27.39/go.mod h1:wczj2hbyskP4LjMKBEZwPRO1shXY+GsQleab+ZXT2ik= github.com/aws/aws-sdk-go-v2/config v1.27.41 h1:esG3WpmEuNJ6F4kVFLumN8nCfA5VBav1KKb3JPx83O4= github.com/aws/aws-sdk-go-v2/config v1.27.41/go.mod h1:haUg09ebP+ClvPjU3EB/xe0HF9PguO19PD2fdjM2X14= -github.com/aws/aws-sdk-go-v2/credentials v1.17.28 h1:m8+AHY/ND8CMHJnPoH7PJIRakWGa4gbfbxuY9TGTUXM= -github.com/aws/aws-sdk-go-v2/credentials v1.17.28/go.mod h1:6TF7dSc78ehD1SL6KpRIPKMA1GyyWflIkjqg+qmf4+c= -github.com/aws/aws-sdk-go-v2/credentials v1.17.29 h1:CwGsupsXIlAFYuDVHv1nnK0wnxO0wZ/g1L8DSK/xiIw= -github.com/aws/aws-sdk-go-v2/credentials v1.17.29/go.mod h1:BPJ/yXV92ZVq6G8uYvbU0gSl8q94UB63nMT5ctNO38g= -github.com/aws/aws-sdk-go-v2/credentials v1.17.30 h1:aau/oYFtibVovr2rDt8FHlU17BTicFEMAi29V1U+L5Q= -github.com/aws/aws-sdk-go-v2/credentials v1.17.30/go.mod h1:BPJ/yXV92ZVq6G8uYvbU0gSl8q94UB63nMT5ctNO38g= -github.com/aws/aws-sdk-go-v2/credentials v1.17.32 h1:7Cxhp/BnT2RcGy4VisJ9miUPecY+lyE9I8JvcZofn9I= -github.com/aws/aws-sdk-go-v2/credentials v1.17.32/go.mod h1:P5/QMF3/DCHbXGEGkdbilXHsyTBX5D3HSwcrSc9p20I= -github.com/aws/aws-sdk-go-v2/credentials v1.17.35 h1:7QknrZhYySEB1lEXJxGAmuD5sWwys5ZXNr4m5oEz0IE= -github.com/aws/aws-sdk-go-v2/credentials v1.17.35/go.mod h1:8Vy4kk7at4aPSmibr7K+nLTzG6qUQAUO4tW49fzUV4E= -github.com/aws/aws-sdk-go-v2/credentials v1.17.37 h1:G2aOH01yW8X373JK419THj5QVqu9vKEwxSEsGxihoW0= -github.com/aws/aws-sdk-go-v2/credentials v1.17.37/go.mod h1:0ecCjlb7htYCptRD45lXJ6aJDQac6D2NlKGpZqyTG6A= github.com/aws/aws-sdk-go-v2/credentials v1.17.39 h1:tmVexAhoGqJxNE2oc4/SJqL+Jz1x1iCPt5ts9XcqZCU= github.com/aws/aws-sdk-go-v2/credentials v1.17.39/go.mod h1:zgOdbDI9epE608PdboJ87CYvPIejAgFevazeJW6iauQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 h1:yjwoSyDZF8Jth+mUk5lSPJCkMC0lMy6FaCD51jm6ayE= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12/go.mod h1:fuR57fAgMk7ot3WcNQfb6rSEn+SUffl7ri+aa8uKysI= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 h1:pfQ2sqNpMVK6xz2RbqLEL0GH87JOwSxPV2rzm8Zsb74= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13/go.mod h1:NG7RXPUlqfsCLLFfi0+IpKN4sCB9D9fw/qTaSB+xRoU= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 h1:C/d03NAmh8C4BZXhuRNboF/DqhBkBCeDiJDcaqIT5pA= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14/go.mod h1:7I0Ju7p9mCIdlrfS+JCgqcYD0VXz/N4yozsox+0o078= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.15 h1:kGjlNc2IXXcxPDcfMyCshNCjVgxUhC/vTJv7NvC9wKk= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.15/go.mod h1:rk/HmqPo+dX0Uv0Q1+4w3QKFdICEGSsTYz1hRWvH8UI= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 h1:TNyt/+X43KJ9IJJMjKfa3bNTiZbUP7DeCxfbTROESwY= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16/go.mod h1:2DwJF39FlNAUiX5pAc0UNeiz16lK2t7IaFcm0LFHEgc= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 h1:pI7Bzt0BJtYA0N/JEC6B8fJ4RBrEMi1LBrkMdFYNSnQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17/go.mod h1:Dh5zzJYMtxfIjYW+/evjQ8uj2OyR/ve2KROHGHlSFqE= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18 h1:kYQ3H1u0ANr9KEKlGs/jTLrBFPo8P8NaH/w7A01NeeM= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18/go.mod h1:r506HmK5JDUh9+Mw4CfGJGSSoqIiLCndAuqXuhbv67Y= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.19 h1:Q/k5wCeJkSWs+62kDfOillkNIJ5NqmE3iOfm48g/W8c= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.19/go.mod h1:Wns1C66VvtA2Bv/cUBuKZKQKdjo7EVMhp90aAa+8oTI= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 h1:jYfy8UPmd+6kJW5YhY0L1/KftReOGxI/4NtVSTh9O/I= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16/go.mod h1:7ZfEPZxkW42Afq4uQB8H2E2e6ebh6mXTueEpYzjCzcs= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 h1:Mqr/V5gvrhA2gvgnF42Zh5iMiQNcOYthFYwCyrnuWlc= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17/go.mod h1:aLJpZlCmjE+V+KtN1q1uyZkfnUWpQGpbsn89XPKyzfU= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18 h1:Z7IdFUONvTcvS7YuhtVxN99v2cCoHRXOS4mTr0B/pUc= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18/go.mod h1:DkKMmksZVVyat+Y+r1dEOgJEfUeA7UngIHWeKsi0yNc= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.19 h1:AYLE0lUfKvN6icFTR/p+NmD1amYKTbqHQ1Nm+jwE6BM= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.19/go.mod h1:1giLakj64GjuH1NBzF/DXqly5DWHtMTaOzRZ53nFX0I= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 h1:Z5r7SycxmSllHYmaAZPpmN8GviDrSGhMS6bldqtXZPw= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15/go.mod h1:CetW7bDE00QoGEmPUoZuRog07SGVAUVW6LFpNP0YfIg= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.16 h1:mimdLQkIX1zr8GIPY1ZtALdBQGxcASiBd2MOp8m/dMc= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.16/go.mod h1:YHk6owoSwrIsok+cAH9PENCOGoH5PU2EllX4vLtSrsY= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.17 h1:Roo69qTpfu8OlJ2Tb7pAYVuF0CpuUMB0IYWwYP/4DZM= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.17/go.mod h1:NcWPxQzGM1USQggaTVwz6VpqMZPX1CvDJLDh6jnOCa4= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.18 h1:OWYvKL53l1rbsUmW7bQyJVsYU/Ii3bbAAQIIFNbM0Tk= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.18/go.mod h1:CUx0G1v3wG6l01tUB+j7Y8kclA8NSqK4ef0YG79a4cg= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.19 h1:FKdiFzTxlTRO71p0C7VrLbkkdW8qfMKF5+ej6bTmkT0= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.19/go.mod h1:abO3pCj7WLQPTllnSeYImqFfkGrmJV0JovWo/gqT5N0= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.175.1 h1:7B5ppg4i5N2B6t+aH77WLbAu8sD98MLlzruWzq5scyY= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.175.1/go.mod h1:ISODge3zgdwOEa4Ou6WM9PKbxJWJ15DYKnr2bfmCAIA= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.176.0 h1:fWhkSvaQqa5eWiRwBw10FUnk1YatAQ9We4GdGxKiCtg= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.176.0/go.mod h1:ISODge3zgdwOEa4Ou6WM9PKbxJWJ15DYKnr2bfmCAIA= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.177.0 h1:LAdDRIj5BEZM9fLDTUWUyPzWvv5A++nCEps/RGmZNOo= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.177.0/go.mod h1:ISODge3zgdwOEa4Ou6WM9PKbxJWJ15DYKnr2bfmCAIA= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.177.2 h1:QUUvxEs9q1DsYCaWaRrV8i7n82Adm34jrHb6OPjXPqc= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.177.2/go.mod h1:TFSALWR7Xs7+KyMM87ZAYxncKFBvzEt2rpK/BJCH2ps= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.177.3 h1:dqdCh1M8h+j8OGNUpxTs7eBPFr6lOdLpdlE6IPLLSq4= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.177.3/go.mod h1:TFSALWR7Xs7+KyMM87ZAYxncKFBvzEt2rpK/BJCH2ps= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.178.0 h1:yCVmlqH1bWVmdS/oFyyM+hbe2c+tKGPo6r0BHhTpn1U= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.178.0/go.mod h1:W6sNzs5T4VpZn1Vy+FMKw8s24vt5k6zPJXcNOK0asBo= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.179.2 h1:rGBv2N0zWvNTKnxOfbBH4mNM8WMdDNkaxdqtz152G40= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.179.2/go.mod h1:W6sNzs5T4VpZn1Vy+FMKw8s24vt5k6zPJXcNOK0asBo= github.com/aws/aws-sdk-go-v2/service/ec2 v1.181.0 h1:YzSOMQYRZQKuLz/bD6illIGwJfa1WFfeFAZM5Zr5LB8= github.com/aws/aws-sdk-go-v2/service/ec2 v1.181.0/go.mod h1:CudaKF0Yu5+ZfKMiiPdtJ/kOOBty7CIEJUhESP52e9M= -github.com/aws/aws-sdk-go-v2/service/eks v1.48.1 h1:KZ1GkevaklMvPxcqivG4UDwar3lqMSpbK9RpZowjMec= -github.com/aws/aws-sdk-go-v2/service/eks v1.48.1/go.mod h1:fff5mmwLCVxyXCojYjPY34sUGvWtXCD325yRL5qHAVs= -github.com/aws/aws-sdk-go-v2/service/eks v1.48.2 h1:EFjJfHrl7/2qh/ZawUXtl9juOPAUUOTFDLOmov5KSgM= -github.com/aws/aws-sdk-go-v2/service/eks v1.48.2/go.mod h1:fff5mmwLCVxyXCojYjPY34sUGvWtXCD325yRL5qHAVs= -github.com/aws/aws-sdk-go-v2/service/eks v1.48.4 h1:rgYF107dG64XdYhQ1N0ac2G+8L3I+fD4Vsw8zz9wOKA= -github.com/aws/aws-sdk-go-v2/service/eks v1.48.4/go.mod h1:9dn8p15siUL80NCTPVNd+YvEpVTmWO+rboGx6qOMBa0= -github.com/aws/aws-sdk-go-v2/service/eks v1.49.0 h1:soZyFrtL96yjSG8htIcdSlunboFzp7BidxHn2SMlbJ4= -github.com/aws/aws-sdk-go-v2/service/eks v1.49.0/go.mod h1:QUjwO93Ri00egMAeWw75dviZBM5pECLx0KNeNaBtTIM= -github.com/aws/aws-sdk-go-v2/service/eks v1.49.1 h1:1EJ49JWtC3wS/rImBX/6RAna2gEhBqYWYFpETSmPsVs= -github.com/aws/aws-sdk-go-v2/service/eks v1.49.1/go.mod h1:QUjwO93Ri00egMAeWw75dviZBM5pECLx0KNeNaBtTIM= -github.com/aws/aws-sdk-go-v2/service/eks v1.49.3 h1:4Aq01bwq1RnyMLAgx/6kB8cqvfLlQet5cWY3MVhlsqU= -github.com/aws/aws-sdk-go-v2/service/eks v1.49.3/go.mod h1:QUjwO93Ri00egMAeWw75dviZBM5pECLx0KNeNaBtTIM= github.com/aws/aws-sdk-go-v2/service/eks v1.50.0 h1:eL4AEDwVx29t+B7dkcuL/3W+RQKR64PPbfQVQTs8FEs= github.com/aws/aws-sdk-go-v2/service/eks v1.50.0/go.mod h1:0C9DxOpj1d8GioesPAKXMob9X2lyFepeL6C5z9oA4HM= -github.com/aws/aws-sdk-go-v2/service/iam v1.35.0 h1:xIjTizH74aMNQBjp9D5cvjRZmOYtnrpjOGU3xkVqrjk= -github.com/aws/aws-sdk-go-v2/service/iam v1.35.0/go.mod h1:IdHqqRLKgxYR4IY7Omd7SuV4SJzJ8seF+U5PW+mvtP4= -github.com/aws/aws-sdk-go-v2/service/iam v1.35.2 h1:CK5cIZTxza9ki/4eghMeLk32/UeVcPgyDBNiFfbcG0U= -github.com/aws/aws-sdk-go-v2/service/iam v1.35.2/go.mod h1:PpmEOH3ZTQlDAezieBVdFMjPO1jovUMNPA4OpCtnwbY= -github.com/aws/aws-sdk-go-v2/service/iam v1.36.0 h1:3xvpoYVNxINJ26xmjvYFC7T4jiTICCdxwREEu0G7vqg= -github.com/aws/aws-sdk-go-v2/service/iam v1.36.0/go.mod h1:HSvujsK8xeEHMIB18oMXjSfqaN9cVqpo/MtHJIksQRk= -github.com/aws/aws-sdk-go-v2/service/iam v1.36.1 h1:uBOxRx7j+9NoCkmQ2Nmmh/KvKm1l+wm917By8bgtKdU= -github.com/aws/aws-sdk-go-v2/service/iam v1.36.1/go.mod h1:HSvujsK8xeEHMIB18oMXjSfqaN9cVqpo/MtHJIksQRk= -github.com/aws/aws-sdk-go-v2/service/iam v1.36.3 h1:dV9iimLEHKYAz2qTi+tGAD9QCnAG2pLD7HUEHB7m4mI= -github.com/aws/aws-sdk-go-v2/service/iam v1.36.3/go.mod h1:HSvujsK8xeEHMIB18oMXjSfqaN9cVqpo/MtHJIksQRk= github.com/aws/aws-sdk-go-v2/service/iam v1.37.0 h1:FLdmwEJUDWdAflqxRNkIKNZki8dFmi5SUeTjAjxrdJU= github.com/aws/aws-sdk-go-v2/service/iam v1.37.0/go.mod h1:Xctz/06SeHDUc3ZheMxXekSZ2rx0RX9SVhV5JeQgoqY= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 h1:KypMCbLPPHEmf9DgMGw51jMj77VfGPAN2Kv4cfhlfgI= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5 h1:QFASJGfT8wMXtuP3D5CRmMjARHv9ZmzFUMJznHDOY3w= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5/go.mod h1:QdZ3OmoIjSX+8D1OPAzPxDfjXASbBMDsz9qvtyIhtik= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 h1:TToQNkvGguu209puTojY/ozlqy2d/SFNcoLIqTFi42g= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0/go.mod h1:0jp+ltwkf+SwG2fm/PKo8t4y8pJSgOCO4D8Lz3k0aHQ= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 h1:YPYe6ZmvUfDDDELqEKtAd6bo8zxhkm+XEFEzQisqUIE= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17/go.mod h1:oBtcnYua/CgzCWYN7NZ5j7PotFDaFSUjCYVTtfyn7vw= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.18 h1:GckUnpm4EJOAio1c8o25a+b3lVfwVzC9gnSBqiiNmZM= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.18/go.mod h1:Br6+bxfG33Dk3ynmkhsW2Z/t9D4+lRqdLDNCKi85w0U= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.19 h1:FLMkfEiRjhgeDTCjjLoc3URo/TBkgeQbocA78lfkzSI= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.19/go.mod h1:Vx+GucNSsdhaxs3aZIKfSUjKVGsxN25nX2SRcdhuw08= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.20 h1:rTWjG6AvWekO2B1LHeM3ktU7MqyX9rzWQ7hgzneZW7E= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.20/go.mod h1:RGW2DDpVc8hu6Y6yG8G5CHVmVOAn1oV8rNKOHRJyswg= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.0 h1:FQNWhRuSq8QwW74GtU0MrveNhZbqvHsA4dkA9w8fTDQ= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.0/go.mod h1:j/zZ3zmWfGCK91K73YsfHP53BSTLSjL/y6YN39XbBLM= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 h1:tJ5RnkHCiSH0jyd6gROjlJtNwov0eGYNz8s8nFcR0jQ= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18/go.mod h1:++NHzT+nAF7ZPrHPsA+ENvsXkOO8wEu+C6RXltAG4/c= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 h1:rfprUlsdzgl7ZL2KlXiUAoJnI/VxfHCvDFr2QDFj6u4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19/go.mod h1:SCWkEdRq8/7EK60NcvvQ6NXKuTcchAD4ROAsC37VEZE= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20 h1:Xbwbmk44URTiHNx6PNo0ujDE6ERlsCKJD3u1zfnzAPg= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20/go.mod h1:oAfOFzUB14ltPZj1rWwRc3d/6OgD76R8KlvU3EqM9Fg= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.0 h1:AdbiDUgQZmM28rDIZbiSwFxz8+3B94aOXxzs6oH+EA0= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.0/go.mod h1:uV476Bd80tiDTX4X2redMtagQUg65aU/gzPojSJ4kSI= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 h1:246A4lSTXWJw/rmlQI+TT2OcqeDMKBdyjEQrafMaQdA= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15/go.mod h1:haVfg3761/WF7YPuJOER2MP0k4UAXyHaLclKXB6usDg= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 h1:jg16PhLPUiHIj8zYIW6bqzeQSuHVEiWnGA0Brz5Xv2I= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16/go.mod h1:Uyk1zE1VVdsHSU7096h/rwnXDzOzYQVl+FNPhPw7ShY= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.17 h1:u+EfGmksnJc/x5tq3A+OD7LrMbSSR/5TrKLvkdy/fhY= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.17/go.mod h1:VaMx6302JHax2vHJWgRo+5n9zvbacs3bLU/23DNQrTY= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.18 h1:eb+tFOIl9ZsUe2259/BKPeniKuz4/02zZFH/i4Nf8Rg= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.18/go.mod h1:GVCC2IJNJTmdlyEsSmofEy7EfJncP7DNnXDzRjJ5Keg= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.0 h1:1NKXS8XfhMM0bg5wVYa/eOH8AM2f6JijugbKEyQFTIg= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.0/go.mod h1:ph931DUfVfgrhZR7py9olSvHCiRpvaGxNvlWBcXxFds= -github.com/aws/aws-sdk-go-v2/service/kms v1.35.4 h1:mG1MH6yPwT5gNEeBrhig3FHc4mK0QaZOXsmQUbphP6Y= -github.com/aws/aws-sdk-go-v2/service/kms v1.35.4/go.mod h1:A5CS0VRmxxj2YKYLCY08l/Zzbd01m6JZn0WzxgT1OCA= -github.com/aws/aws-sdk-go-v2/service/kms v1.35.5 h1:XUomV7SiclZl1QuXORdGcfFqHxEHET7rmNGtxTfNB+M= -github.com/aws/aws-sdk-go-v2/service/kms v1.35.5/go.mod h1:A5CS0VRmxxj2YKYLCY08l/Zzbd01m6JZn0WzxgT1OCA= -github.com/aws/aws-sdk-go-v2/service/kms v1.35.7 h1:v0D1LeMkA/X+JHAZWERrr+sUGOt8KrCZKnJA6KszkcE= -github.com/aws/aws-sdk-go-v2/service/kms v1.35.7/go.mod h1:K9lwD0Rsx9+NSaJKsdAdlDK4b2G4KKOEve9PzHxPoMI= -github.com/aws/aws-sdk-go-v2/service/kms v1.36.0 h1:jwWMpQ/1obJRdHaix9k10zWSnSMZGdDTZIDiS5CGzq8= -github.com/aws/aws-sdk-go-v2/service/kms v1.36.0/go.mod h1:OHmlX4+o0XIlJAQGAHPIy0N9yZcYS/vNG+T7geSNcFw= -github.com/aws/aws-sdk-go-v2/service/kms v1.36.1 h1:BkicHsJOtGRLSGw2CSvtbdGlMboP8S/AsWzf0U2V6m8= -github.com/aws/aws-sdk-go-v2/service/kms v1.36.1/go.mod h1:OHmlX4+o0XIlJAQGAHPIy0N9yZcYS/vNG+T7geSNcFw= -github.com/aws/aws-sdk-go-v2/service/kms v1.36.3 h1:iHi6lC6LfW6SNvB2bixmlOW3WMyWFrHZCWX+P+CCxMk= -github.com/aws/aws-sdk-go-v2/service/kms v1.36.3/go.mod h1:OHmlX4+o0XIlJAQGAHPIy0N9yZcYS/vNG+T7geSNcFw= github.com/aws/aws-sdk-go-v2/service/kms v1.37.0 h1:ovrHGOiNu4S0GSMeexZlsMhBkUb3bCE3iOktFZ7rmBU= github.com/aws/aws-sdk-go-v2/service/kms v1.37.0/go.mod h1:YLqfMkq9GWbICgqT5XMIzT8I2+MxVKodTnNBo3BONgE= -github.com/aws/aws-sdk-go-v2/service/rds v1.82.1 h1:4s+9AtQQGB5n0xMm0xRbIQOFoi6rrggMlFt8WwHcDvs= -github.com/aws/aws-sdk-go-v2/service/rds v1.82.1/go.mod h1:hfUZhydujCniydsJdzZ9bwzX6nUvbfnhhYQeFNREC2I= -github.com/aws/aws-sdk-go-v2/service/rds v1.82.2 h1:kO/fQcueYZvuL5kPzTPQ503cKZj8jyBNg1MlnIqpFPg= -github.com/aws/aws-sdk-go-v2/service/rds v1.82.2/go.mod h1:hfUZhydujCniydsJdzZ9bwzX6nUvbfnhhYQeFNREC2I= -github.com/aws/aws-sdk-go-v2/service/rds v1.82.4 h1:Go6suRegLmIpQiuiTNyUUyxYrhzbrliD9wD0ZN65hlQ= -github.com/aws/aws-sdk-go-v2/service/rds v1.82.4/go.mod h1:zNFNa99yH2j3zzqZgt3Atu197K1UkE+1sfigpi5+eWo= -github.com/aws/aws-sdk-go-v2/service/rds v1.82.5 h1:MB+bWfwWE/FnTGBO0yxXbdSMvU/753OHVdMbMgQezxI= -github.com/aws/aws-sdk-go-v2/service/rds v1.82.5/go.mod h1:zNFNa99yH2j3zzqZgt3Atu197K1UkE+1sfigpi5+eWo= -github.com/aws/aws-sdk-go-v2/service/rds v1.84.0 h1:y7CROMOdAjkkijg+ClGBa2KnhL7oeOP0mmBFJMSCWPc= -github.com/aws/aws-sdk-go-v2/service/rds v1.84.0/go.mod h1:lhiPj6RvoJHWG2STp+k5az55YqGgFLBzkKYdYHgUh9g= -github.com/aws/aws-sdk-go-v2/service/rds v1.85.2 h1:KDO/FSO8V+zlvnQF6v4nOariw2qwPx5/z2pyb6X7ibk= -github.com/aws/aws-sdk-go-v2/service/rds v1.85.2/go.mod h1:lhiPj6RvoJHWG2STp+k5az55YqGgFLBzkKYdYHgUh9g= +github.com/aws/aws-sdk-go-v2/service/opensearch v1.40.2 h1:tQMi7jzkFcuLobVKrW4edPnnreXLNaHRJKgLutxvPdY= +github.com/aws/aws-sdk-go-v2/service/opensearch v1.40.2/go.mod h1:4rB9oWpduMw/+UqL/WdNLJZNF7iAwaJWwJ6GgsQqOjg= github.com/aws/aws-sdk-go-v2/service/rds v1.87.0 h1:f7u5jzUHaIIn5F121ortA0g2yDDWiPeTw2lWrgk9+ZA= github.com/aws/aws-sdk-go-v2/service/rds v1.87.0/go.mod h1:agnQGhYbHXxPM2+zZH4WZIpki6IDU6zFGzfOlnu+1Ow= -github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2 h1:sZXIzO38GZOU+O0C+INqbH7C2yALwfMWpd64tONS/NE= -github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2/go.mod h1:Lcxzg5rojyVPU/0eFwLtcyTaek/6Mtic5B1gJo7e/zE= -github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1 h1:mx2ucgtv+MWzJesJY9Ig/8AFHgoE5FwLXwUVgW/FGdI= -github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI= -github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0 h1:Wb544Wh+xfSXqJ/j3R4aX9wrKUoZsJNmilBYZb3mKQ4= -github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI= -github.com/aws/aws-sdk-go-v2/service/s3 v1.61.2 h1:Kp6PWAlXwP1UvIflkIP6MFZYBNDCa4mFCGtxrpICVOg= -github.com/aws/aws-sdk-go-v2/service/s3 v1.61.2/go.mod h1:5FmD/Dqq57gP+XwaUnd5WFPipAuzrf0HmupX27Gvjvc= -github.com/aws/aws-sdk-go-v2/service/s3 v1.63.0 h1:F6KG9CT7PPqAjnRxjKmYJopVnXPwjlzPI2FEgXHajNY= -github.com/aws/aws-sdk-go-v2/service/s3 v1.63.0/go.mod h1:NLTqRLe3pUNu3nTEHI6XlHLKYmc8fbHUdMxAB6+s41Q= -github.com/aws/aws-sdk-go-v2/service/s3 v1.63.1 h1:TR96r56VwELV0qguNFCuz+/bEpRfnR3ZsS9/IG05C7Q= -github.com/aws/aws-sdk-go-v2/service/s3 v1.63.1/go.mod h1:NLTqRLe3pUNu3nTEHI6XlHLKYmc8fbHUdMxAB6+s41Q= -github.com/aws/aws-sdk-go-v2/service/s3 v1.63.3 h1:3zt8qqznMuAZWDTDpcwv9Xr11M/lVj2FsRR7oYBt0OA= -github.com/aws/aws-sdk-go-v2/service/s3 v1.63.3/go.mod h1:NLTqRLe3pUNu3nTEHI6XlHLKYmc8fbHUdMxAB6+s41Q= github.com/aws/aws-sdk-go-v2/service/s3 v1.65.0 h1:2dSm7frMrw2tdJ0QvyccQNJyPGaP24dyDgZ6h1QJMGU= github.com/aws/aws-sdk-go-v2/service/s3 v1.65.0/go.mod h1:4XSVpw66upN8wND3JZA29eXl2NOZvfFVq7DIP6xvfuQ= -github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 h1:zCsFCKvbj25i7p1u94imVoO447I/sFv8qq+lGJhRN0c= -github.com/aws/aws-sdk-go-v2/service/sso v1.22.5/go.mod h1:ZeDX1SnKsVlejeuz41GiajjZpRSWR7/42q/EyA/QEiM= -github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 h1:pIaGg+08llrP7Q5aiz9ICWbY8cqhTkyy+0SHvfzQpTc= -github.com/aws/aws-sdk-go-v2/service/sso v1.22.7/go.mod h1:eEygMHnTKH/3kNp9Jr1n3PdejuSNcgwLe1dWgQtO0VQ= -github.com/aws/aws-sdk-go-v2/service/sso v1.23.1 h1:2jrVsMHqdLD1+PA4BA6Nh1eZp0Gsy3mFSB5MxDvcJtU= -github.com/aws/aws-sdk-go-v2/service/sso v1.23.1/go.mod h1:XRlMvmad0ZNL+75C5FYdMvbbLkd6qiqz6foR1nA1PXY= -github.com/aws/aws-sdk-go-v2/service/sso v1.23.3 h1:rs4JCczF805+FDv2tRhZ1NU0RB2H6ryAvsWPanAr72Y= -github.com/aws/aws-sdk-go-v2/service/sso v1.23.3/go.mod h1:XRlMvmad0ZNL+75C5FYdMvbbLkd6qiqz6foR1nA1PXY= github.com/aws/aws-sdk-go-v2/service/sso v1.24.0 h1:71FvP6XFj53NK+YiAEGVzeiccLVeFnHOCvMig0zOHsE= github.com/aws/aws-sdk-go-v2/service/sso v1.24.0/go.mod h1:UVJqtKXSd9YppRKgdBIkyv7qgbSGv5DchM3yX0BN2mU= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 h1:SKvPgvdvmiTWoi0GAJ7AsJfOz3ngVkD/ERbs5pUnHNI= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5/go.mod h1:20sz31hv/WsPa3HhU3hfrIet2kxM4Pe0r20eBZ20Tac= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 h1:/Cfdu0XV3mONYKaOt1Gr0k1KvQzkzPyiKUdlWJqy+J4= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7/go.mod h1:bCbAxKDqNvkHxRaIMnyVPXPo+OaPRwvmgzMxbz1VKSA= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.1 h1:0L7yGCg3Hb3YQqnSgBTZM5wepougtL1aEccdcdYhHME= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.1/go.mod h1:FnvDM4sfa+isJ3kDXIzAB9GAwVSzFzSy97uZ3IsHo4E= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.3 h1:S7EPdMVZod8BGKQQPTBK+FcX9g7bKR7c4+HxWqHP7Vg= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.3/go.mod h1:FnvDM4sfa+isJ3kDXIzAB9GAwVSzFzSy97uZ3IsHo4E= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.0 h1:Uco4o19bi3AmBapImNzuMk+rfzlui52BDyVK1UfJeRA= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.0/go.mod h1:+HLFhCpnG08hBee8bUdfd1mBK+rFKPt4O5igR9lXDfk= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 h1:iAckBT2OeEK/kBDyN/jDtpEExhjeeA/Im2q4X0rJZT8= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.4/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 h1:OMsEmCyz2i89XwRwPouAJvhj81wINh+4UK+k/0Yo/q8= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.5/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 h1:NKTa1eqZYw8tiHSRGpP0VtTdub/8KNk8sDkNPFaOKDE= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.7/go.mod h1:NXi1dIAGteSaRLqYgarlhP/Ij0cFT+qmCwiJqWh/U5o= -github.com/aws/aws-sdk-go-v2/service/sts v1.31.1 h1:8K0UNOkZiK9Uh3HIF6Bx0rcNCftqGCeKmOaR7Gp5BSo= -github.com/aws/aws-sdk-go-v2/service/sts v1.31.1/go.mod h1:yMWe0F+XG0DkRZK5ODZhG7BEFYhLXi2dqGsv6tX0cgI= -github.com/aws/aws-sdk-go-v2/service/sts v1.31.3 h1:VzudTFrDCIDakXtemR7l6Qzt2+JYsVqo2MxBPt5k8T8= -github.com/aws/aws-sdk-go-v2/service/sts v1.31.3/go.mod h1:yMWe0F+XG0DkRZK5ODZhG7BEFYhLXi2dqGsv6tX0cgI= github.com/aws/aws-sdk-go-v2/service/sts v1.32.0 h1:GiQUjZM2KUZX68o/LpZ1xqxYMuvoxpRrOwYARYog3vc= github.com/aws/aws-sdk-go-v2/service/sts v1.32.0/go.mod h1:dKnu7M4MAS2SDlng1ytxd03H+y0LoUfEQ5E2VaaSw/4= -github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4= -github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= -github.com/aws/smithy-go v1.21.0 h1:H7L8dtDRk0P1Qm6y0ji7MCYMQObJ5R9CRpyPhRUkLYA= -github.com/aws/smithy-go v1.21.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM= github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -625,18 +435,12 @@ github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/gruntwork-io/go-commons v0.8.0 h1:k/yypwrPqSeYHevLlEDmvmgQzcyTwrlZGRaxEM6G0ro= github.com/gruntwork-io/go-commons v0.8.0/go.mod h1:gtp0yTtIBExIZp7vyIV9I0XQkVwiQZze678hvDXof78= -github.com/gruntwork-io/terratest v0.47.0 h1:xIy1pT7NbGVlMLDZEHl3+3iSnvffh8tN2pL6idn448c= -github.com/gruntwork-io/terratest v0.47.0/go.mod h1:oywHw1cFKXSYvKPm27U7quZVzDUlA22H2xUrKCe26xM= -github.com/gruntwork-io/terratest v0.47.1 h1:qOaxnL7Su5+KpDHYUN/ek1jn8ImvCKtOkaY4OSMS4tI= -github.com/gruntwork-io/terratest v0.47.1/go.mod h1:LnYX8BN5WxUMpDr8rtD39oToSL4CBERWSCusbJ0d/64= github.com/gruntwork-io/terratest v0.47.2 h1:t6iWwsqJH7Gx0RwXleU/vjc+2c0JXRMdj3DxYXTBssQ= github.com/gruntwork-io/terratest v0.47.2/go.mod h1:LnYX8BN5WxUMpDr8rtD39oToSL4CBERWSCusbJ0d/64= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-getter v1.7.5 h1:dT58k9hQ/vbxNMwoI5+xFYAJuv6152UNvdHokfI5wE4= -github.com/hashicorp/go-getter v1.7.5/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= github.com/hashicorp/go-getter v1.7.6 h1:5jHuM+aH373XNtXl9TNTUH5Qd69Trve11tHIrB+6yj4= github.com/hashicorp/go-getter v1.7.6/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= @@ -743,6 +547,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sethvargo/go-password v0.3.1 h1:WqrLTjo7X6AcVYfC6R7GtSyuUQR9hGyAj/f1PYQZCJU= +github.com/sethvargo/go-password v0.3.1/go.mod h1:rXofC1zT54N7R8K/h1WDUdkf9BOx5OptoxrMBcrXzvs= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= @@ -1337,16 +1143,10 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= -k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= -k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= -k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= -k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= @@ -1358,12 +1158,6 @@ k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/aws-iam-authenticator v0.6.23 h1:efBMSbBx8XSw8zLeh0tQEOtWGZIS2d/NXqydFoMjGSM= -sigs.k8s.io/aws-iam-authenticator v0.6.23/go.mod h1:8CAmUtqsLmv5QvnhXQ2+byy1EL+TCDyyYTGFXDyt0sk= -sigs.k8s.io/aws-iam-authenticator v0.6.25 h1:ndRRNTQgoxhpZqMHgy1MO+CLrHq3rb+/0xG07cuoc8s= -sigs.k8s.io/aws-iam-authenticator v0.6.25/go.mod h1:8CAmUtqsLmv5QvnhXQ2+byy1EL+TCDyyYTGFXDyt0sk= -sigs.k8s.io/aws-iam-authenticator v0.6.26 h1:cA/xsMLEG81nrSdGW2j4wBBQN50EvDTB9QT4dFJ2VAg= -sigs.k8s.io/aws-iam-authenticator v0.6.26/go.mod h1:8CAmUtqsLmv5QvnhXQ2+byy1EL+TCDyyYTGFXDyt0sk= sigs.k8s.io/aws-iam-authenticator v0.6.27 h1:uzSwFYh+hrrbpv7goZ+2FN/2oCQddiKpb8l5vBbY1i4= sigs.k8s.io/aws-iam-authenticator v0.6.27/go.mod h1:8CAmUtqsLmv5QvnhXQ2+byy1EL+TCDyyYTGFXDyt0sk= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/test/src/upgrade_eks_test.go b/test/src/upgrade_eks_test.go index 9e1494a8..1675327a 100644 --- a/test/src/upgrade_eks_test.go +++ b/test/src/upgrade_eks_test.go @@ -122,7 +122,8 @@ func (suite *UpgradeEKSTestSuite) TestUpgradeEKS() { defer utils.DeferCleanup(suite.T(), suite.bucketRegion, terraformOptions) } - // since v20, we can't use InitAndApplyAndIdempotent due to labels being added + // due to output of the creation changing tags from null to {}, we can't pass the + // idempotency test terraform.InitAndApply(suite.T(), terraformOptions) sess, err := utils.GetAwsClient() @@ -211,7 +212,8 @@ func (suite *UpgradeEKSTestSuite) TestUpgradeEKS() { defer utils.DeferCleanup(suite.T(), suite.bucketRegion, terraformOptions) } - // since v20, we can't use InitAndApplyAndIdempotent due to labels being added + // due to output of the creation changing tags from null to {}, we can't pass the + // idempotency test terraform.InitAndApply(suite.T(), terraformOptions) errClusterReady = utils.WaitUntilKubeClusterIsReady(result.Cluster, 5*time.Minute, uint64(suite.expectedNodes)) diff --git a/test/src/utils/aws.go b/test/src/utils/aws.go index 2db4d367..be2dd388 100644 --- a/test/src/utils/aws.go +++ b/test/src/utils/aws.go @@ -12,6 +12,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3" types2 "github.com/aws/aws-sdk-go-v2/service/s3/types" "net/http" + "strings" "time" ) @@ -159,3 +160,12 @@ func DeleteObjectFromS3Bucket(sess aws.Config, s3Bucket string, objectToDelete s fmt.Printf("Successfully deleted object %q from bucket %q\n", objectToDelete, s3Bucket) return nil } + +// ExtractOIDCProviderID extracts the OIDC provider from the EKS cluster result (without scheme, eg. no https://). +func ExtractOIDCProviderID(clusterResult *eks.DescribeClusterOutput) (string, error) { + if clusterResult == nil || clusterResult.Cluster == nil || clusterResult.Cluster.Identity == nil { + return "", fmt.Errorf("invalid cluster result") + } + + return strings.ReplaceAll(*clusterResult.Cluster.Identity.Oidc.Issuer, "https://", ""), nil +} diff --git a/test/src/utils/kube.go b/test/src/utils/kube.go index 015b9655..c7bc2b5a 100644 --- a/test/src/utils/kube.go +++ b/test/src/utils/kube.go @@ -74,6 +74,28 @@ func CreateIfNotExistsNamespace(t *testing.T, kubeCtlOptions *k8s.KubectlOptions } } +func CreateIfNotExistsServiceAccount(t *testing.T, kubeCtlOptions *k8s.KubectlOptions, serviceAccountName string, annotations map[string]string) { + _, errFindSA := k8s.GetServiceAccountE(t, kubeCtlOptions, serviceAccountName) + if errFindSA != nil { + if errors.IsNotFound(errFindSA) { + // Create service account with annotations if it does not exist + serviceAccount := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceAccountName, + Namespace: kubeCtlOptions.Namespace, + Annotations: annotations, + }, + } + clientset, errClient := k8s.GetKubernetesClientFromOptionsE(t, kubeCtlOptions) + require.NoError(t, errClient) + _, errSA := clientset.CoreV1().ServiceAccounts(kubeCtlOptions.Namespace).Create(context.Background(), serviceAccount, metav1.CreateOptions{}) + require.NoError(t, errSA) + } else { + require.NoError(t, errFindSA) + } + } +} + func GenerateKubeConfigFromAWS(t *testing.T, region, clusterName, awsProfile, configOutputPath string) { cmd := exec.Command("aws", "eks", "--region", region, "update-kubeconfig", "--name", clusterName, "--profile", awsProfile, "--kubeconfig", configOutputPath) _, errCmdKubeProfile := cmd.Output()