diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index a781456d..5c07f878 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -27,7 +27,6 @@ "ms-vscode.azurecli", "ms-azuretools.vscode-docker", "aaron-bond.better-comments", - "coenraads.bracket-pair-colorizer-2", "eamodio.gitlens", "ms-kubernetes-tools.vscode-kubernetes-tools", "yzhang.markdown-all-in-one", diff --git a/.devcontainer/library-scripts/azcli-debian.sh b/.devcontainer/library-scripts/azcli-debian.sh index cde7fd7c..8e265ff4 100644 --- a/.devcontainer/library-scripts/azcli-debian.sh +++ b/.devcontainer/library-scripts/azcli-debian.sh @@ -30,4 +30,4 @@ echo "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ $(lsb_rel curl -sL https://packages.microsoft.com/keys/microsoft.asc | (OUT=$(apt-key add - 2>&1) || echo $OUT) apt-get update apt-get install -y azure-cli -echo "Done!" \ No newline at end of file +echo "Done!" diff --git a/.devcontainer/library-scripts/docker-debian.sh b/.devcontainer/library-scripts/docker-debian.sh index 7ca6d544..d046837d 100644 --- a/.devcontainer/library-scripts/docker-debian.sh +++ b/.devcontainer/library-scripts/docker-debian.sh @@ -178,4 +178,4 @@ exec "\$@" EOF chmod +x /usr/local/share/docker-init.sh chown ${USERNAME}:root /usr/local/share/docker-init.sh -echo "Done!" \ No newline at end of file +echo "Done!" diff --git a/.devcontainer/library-scripts/node-debian.sh b/.devcontainer/library-scripts/node-debian.sh index d61046d2..a1b747e8 100644 --- a/.devcontainer/library-scripts/node-debian.sh +++ b/.devcontainer/library-scripts/node-debian.sh @@ -121,4 +121,4 @@ EOF ) | tee -a /etc/bash.bashrc >> /etc/zsh/zshrc fi -echo "Done!" \ No newline at end of file +echo "Done!" diff --git a/.github/workflows/TriggerCustomAction.ps1 b/.github/workflows/TriggerCustomAction.ps1 index 91d16fe7..3c11c1f3 100644 --- a/.github/workflows/TriggerCustomAction.ps1 +++ b/.github/workflows/TriggerCustomAction.ps1 @@ -34,7 +34,7 @@ param( $GithubUserName = "adamrushuk", - $GithubRepo = "aks-nexus-velero", + $GithubRepo = "devops-lab", [ValidateSet("test", "build", "deallocate_vmss", "start_vmss", "destroy")] $CustomEventAction = "test" diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index bf8f00fc..936c9678 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -7,10 +7,6 @@ name: build # name of GitHub event that triggers workflow # https://help.github.com/en/actions/reference/events-that-trigger-workflows#watch-event-watch on: - # trigger when I star my own repo - watch: - types: [started] - # trigger via webhook # https://github.com/adamrushuk/devops-lab/blob/master/TriggerCustomAction.ps1#L28 repository_dispatch: @@ -21,6 +17,12 @@ on: workflow_dispatch: inputs: {} +# permissions for oidc login +# https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect +permissions: + id-token: write # This is required for requesting the JWT + contents: read # This is required for actions/checkout + # global environment variables # https://help.github.com/en/actions/configuring-and-managing-workflows/using-environment-variables env: @@ -28,11 +30,10 @@ env: PREFIX: arshz # debug - CI_DEBUG: false + CI_DEBUG: true - # azure creds + # azure creds (used with OIDC auth) ARM_CLIENT_ID: ${{ secrets.ARM_CLIENT_ID }} - ARM_CLIENT_SECRET: ${{ secrets.ARM_CLIENT_SECRET }} ARM_SUBSCRIPTION_ID: ${{ secrets.ARM_SUBSCRIPTION_ID }} ARM_TENANT_ID: ${{ secrets.ARM_TENANT_ID }} @@ -46,7 +47,7 @@ env: FORCE_TEST_FAIL: false K8S_TLS_SECRET_NAME: tls-secret KEY_VAULT_CERT_NAME: wildcard-thehypepipe-co-uk - KEY_VAULT_NAME: kv-rush-iz6y + KEY_VAULT_NAME: kv-rush-aqy2 KEY_VAULT_RESOURCE_GROUP_NAME: rg-keyvault-acmebot # NOTE: "eastus" is cheaper than "uksouth" LOCATION: eastus @@ -59,14 +60,16 @@ env: # terraform TF_IN_AUTOMATION: "true" TF_INPUT: "false" + # https://developer.hashicorp.com/terraform/internals/debugging + TF_LOG: "ERROR" # TRACE, DEBUG, INFO, WARN or ERROR TF_PLAN: "tfplan" # https://github.com/hashicorp/terraform/releases - TF_VERSION: "1.0.10" + TF_VERSION: "1.3.7" TF_WORKING_DIR: ./terraform - # https://github.com/terraform-linters/tflint-ruleset-azurerm/releases - TFLINT_RULESET_AZURERM_VERSION: "v0.13.2" - # https://github.com/terraform-linters/tflint/releases - TFLINT_VERSION: "v0.33.1" + # azurerm provider oidc + # https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/guides/service_principal_oidc + # https://developer.hashicorp.com/terraform/language/settings/backends/azurerm#oidc_request_token + ARM_USE_OIDC: "true" # Env var concatenation is currently not supported at Workflow or Job scope. See workaround below: # https://github.community/t5/GitHub-Actions/How-can-we-concatenate-multiple-env-vars-at-workflow-and-job/td-p/48489 @@ -75,11 +78,11 @@ jobs: build-and-deploy: # always pin versions # view installed software: https://docs.github.com/en/free-pro-team@latest/actions/reference/specifications-for-github-hosted-runners#supported-software - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 # https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idenvironment environment: - name: dev_environment + name: dev url: "https://argocd.${{ env.ROOT_DOMAIN_NAME }}" # only run if owner triggered action @@ -88,7 +91,7 @@ jobs: steps: # Checkout # https://github.com/marketplace/actions/checkout - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 # specify different branch # NOT required as I've changed the default branch to develop # with: @@ -105,13 +108,19 @@ jobs: echo "DNS_DOMAIN_NAME=nexus.${{ env.ROOT_DOMAIN_NAME }}" >> $GITHUB_ENV echo "DOCKER_FQDN=docker.${{ env.ROOT_DOMAIN_NAME }}" >> $GITHUB_ENV echo "TERRAFORM_STORAGE_ACCOUNT=${{ env.PREFIX }}sttfstate${{ env.LOCATION }}001" >> $GITHUB_ENV + echo "TERRAFORM_STORAGE_CONTAINER=terraform" >> $GITHUB_ENV echo "TERRAFORM_STORAGE_RG=${{ env.PREFIX }}-rg-tfstate-dev-001" >> $GITHUB_ENV echo "VELERO_STORAGE_ACCOUNT=${{ env.PREFIX }}stbckuksouth001" >> $GITHUB_ENV - echo "VELERO_STORAGE_RG=${{ env.PREFIX }}-rg-velero-dev-001" >> $GITHUB_ENV # Login - - name: Login to Azure - run: ./scripts/azure_login.sh + # https://github.com/Azure/login + - name: Login via OIDC to Azure Public Cloud (az cli and az powershell) + uses: azure/login@v1 + with: + client-id: ${{ secrets.ARM_CLIENT_ID }} + tenant-id: ${{ secrets.ARM_TENANT_ID }} + subscription-id: ${{ secrets.ARM_SUBSCRIPTION_ID }} + enable-AzPSSession: true # This is required when developing after the initial build, and the AKS cluster may have been stopped # Ensure AKS cluster is running, else timeouts will occur on k8s Terraform apply tasks @@ -123,8 +132,9 @@ jobs: - name: Create Storage Account for Terraform state run: ./scripts/storage_create.sh - - name: Lookup Storage Key - run: ./scripts/storage_key.sh + # TODO remove this step + # - name: Lookup Storage Key + # run: ./scripts/storage_key.sh - name: Replace tokens in Terraform config files run: pwsh -command "./scripts/Replace-Tokens.ps1" @@ -133,26 +143,47 @@ jobs: HELM_CHART_REPO_DEPLOY_PRIVATE_KEY: ${{ secrets.HELM_CHART_REPO_DEPLOY_PRIVATE_KEY }} IFTTT_WEBHOOK_KEY: ${{ secrets.IFTTT_WEBHOOK_KEY }} - - name: Create zip file of Function App - run: pwsh -command "./function_app/CreateFunctionAppZip.ps1" - # Terraform - - uses: hashicorp/setup-terraform@v1 + # https://github.com/hashicorp/setup-terraform + - uses: hashicorp/setup-terraform@v2 with: terraform_version: ${{ env.TF_VERSION }} - name: Terraform Init / Validate run: | - terraform init + echo 'Running terraform init...' + terraform init \ + -backend-config="resource_group_name=$TERRAFORM_STORAGE_RG" \ + -backend-config="storage_account_name=$TERRAFORM_STORAGE_ACCOUNT" + + # validate + echo 'Running terraform validate...' terraform validate working-directory: ${{ env.TF_WORKING_DIR }} - - name: Terraform Lint - run: ./scripts/tflint.sh - env: - TF_WORKING_DIR: ${{ env.TF_WORKING_DIR }} - TFLINT_RULESET_AZURERM_VERSION: ${{ env.TFLINT_RULESET_AZURERM_VERSION }} - TFLINT_VERSION: ${{ env.TFLINT_VERSION }} + # https://github.com/aquasecurity/tfsec-action + - name: Run tfsec security scan + uses: aquasecurity/tfsec-action@v1.0.3 + with: + working_directory: ${{ env.TF_WORKING_DIR }} + soft_fail: true + + # https://github.com/bridgecrewio/checkov-action + - name: Run Checkov security scan + id: checkov + uses: bridgecrewio/checkov-action@master + with: + directory: ${{ env.TF_WORKING_DIR }} + # check: CKV_AWS_1 # optional: run only a specific check_id. can be comma separated list + # optional: skip a specific check_id. can be comma separated list + skip_check: CKV2_AZURE_1,CKV_AZURE_4,CKV_AZURE_6,CKV2_AZURE_8,CKV2_AZURE_21,CKV_AZURE_33,CKV_AZURE_35,CKV_AZURE_117,CKV_AZURE_141 + quiet: true # optional: display only failed checks + soft_fail: true # optional: do not return an error code if there are failed checks + framework: terraform # optional: run only on a specific infrastructure {cloudformation,terraform,kubernetes,all} + output_format: sarif # optional: the output format, one of: cli, json, junitxml, github_failed_only, or sarif. Default: sarif + download_external_modules: true # optional: download external terraform modules from public git repositories and terraform registry + # log_level: DEBUG # optional: set log level. Default WARNING + # config_file: path/this_file - name: ๐Ÿ‘€ Terraform Plan id: plan @@ -182,6 +213,7 @@ jobs: NEXUS_USER_PASSWORD: ${{ secrets.NEXUS_USER_PASSWORD }} # Docker + # https://github.com/Azure/docker-login - name: Docker repo login uses: Azure/docker-login@v1 with: @@ -197,17 +229,18 @@ jobs: run: pwsh -command "./scripts/Fix-FunctionApp.ps1" env: FUNCTION_APP_NAME: "${{ env.PREFIX }}-funcapp" - FUNCTION_APP_RG: "${{ env.PREFIX }}-rg-function-app" + FUNCTION_APP_RG: "${{ env.PREFIX }}-rg-aks-dev-001" # Pester tests - name: ๐Ÿงช Run Pester tests continue-on-error: true run: pwsh -command "./scripts/Start-Test.ps1" + # https://github.com/actions/upload-artifact - name: Archive test artifacts - uses: actions/upload-artifact@v1 + uses: actions/upload-artifact@v3 with: - name: test results + name: test-results path: test/pester-test-results.xml if: always() @@ -216,13 +249,21 @@ jobs: run: pwsh -command "Unregister-PSRepository -Name nuget.org-proxy -Verbose" # Shows at the bottom of a run: https://github.com/adamrushuk/devops-lab/runs/1035347513?check_suite_focus=true - - name: Pester report - uses: zyborg/pester-tests-report@v1 - with: - test_results_path: test/pester-test-results.xml - report_name: pester_tests - report_title: Pester Tests - github_token: ${{ secrets.GITHUB_TOKEN }} + # https://github.com/zyborg/pester-tests-report + # - name: Pester report + # uses: zyborg/pester-tests-report@v1 + # with: + # test_results_path: test/pester-test-results.xml + # report_name: pester_tests + # report_title: Pester Tests + # github_token: ${{ secrets.GITHUB_TOKEN }} + + # - uses: dorny/test-reporter@v1.6.0 + # with: + # # artifact: test-results # artifact name + # name: Pester Tests Results # Name of the check run which will be created + # path: 'test/pester-test-results.xml' # Path to test results (inside artifact .zip) + # reporter: java-junit # Format of test results # Notify - name: Notify slack @@ -230,21 +271,25 @@ jobs: env: SLACK_CHANNEL_ID: ${{ secrets.SLACK_CHANNEL_ID }} SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} - run: ./scripts/send_slack_message.sh "[aks-nexus-velero] Build complete" + run: ./scripts/send_slack_message.sh "[devops-lab] Build complete" # used for any windows-only tasks test-windows: needs: build-and-deploy # https://github.com/actions/virtual-environments - runs-on: windows-2019 + runs-on: windows-2022 + + # https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idenvironment + environment: + name: dev # only run if owner triggered action if: github.actor == github.event.repository.owner.login steps: # Checkout - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 # Init tasks - inc Env var concatenation # https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-commands-for-github-actions#environment-files @@ -256,8 +301,14 @@ jobs: echo "DNS_DOMAIN_NAME=nexus.${{ env.ROOT_DOMAIN_NAME }}" | Out-File -Append -Encoding utf8 -FilePath "$env:GITHUB_ENV" # Login - - name: Login to Azure - run: ./scripts/azure_login.ps1 + # https://github.com/Azure/login + - name: Login via OIDC to Azure Public Cloud (az cli and az powershell) + uses: azure/login@v1 + with: + client-id: ${{ secrets.ARM_CLIENT_ID }} + tenant-id: ${{ secrets.ARM_TENANT_ID }} + subscription-id: ${{ secrets.ARM_SUBSCRIPTION_ID }} + enable-AzPSSession: true # Chocolatey - name: Test Nexus Chocolatey proxy repo diff --git a/.github/workflows/cleanup-resource-groups.yml b/.github/workflows/cleanup-resource-groups.yml new file mode 100644 index 00000000..5c4e8cb1 --- /dev/null +++ b/.github/workflows/cleanup-resource-groups.yml @@ -0,0 +1,84 @@ +# cleanup - (useful after failed build/destroy workflows) +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json + +# https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions +name: cleanup + +# name of GitHub event that triggers workflow +# https://help.github.com/en/actions/reference/events-that-trigger-workflows#watch-event-watch +on: + # trigger via webhook + # https://github.com/adamrushuk/devops-lab/blob/master/TriggerCustomAction.ps1#L28 + repository_dispatch: + types: [cleanup] + + # enable manual workflow + # https://docs.github.com/en/actions/configuring-and-managing-workflows/configuring-a-workflow#manually-running-a-workflow + workflow_dispatch: + inputs: {} + +# permissions for oidc login +# https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect +permissions: + id-token: write # This is required for requesting the JWT + contents: read # This is required for actions/checkout + +# global environment variables +# https://help.github.com/en/actions/configuring-and-managing-workflows/using-environment-variables +env: + # prefix: used for some globally unique name requirements + PREFIX: arshz + # azure creds (used with OIDC auth) + ARM_CLIENT_ID: ${{ secrets.ARM_CLIENT_ID }} + ARM_SUBSCRIPTION_ID: ${{ secrets.ARM_SUBSCRIPTION_ID }} + ARM_TENANT_ID: ${{ secrets.ARM_TENANT_ID }} + +jobs: + cleanup: + # always pin versions + # view installed software: https://help.github.com/en/actions/reference/software-installed-on-github-hosted-runners + runs-on: ubuntu-22.04 + + # https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idenvironment + environment: + name: dev + + # only run if owner triggered action + if: github.actor == github.event.repository.owner.login + + steps: + # Checkout + # Reference the major version of a release + # https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions#example-using-versioned-actions + - uses: actions/checkout@v3 + + # Init tasks + - name: Init + run: | + chmod -R +x ./scripts/ + + # Login + # https://github.com/Azure/login + - name: Login via OIDC to Azure Public Cloud (az cli and az powershell) + uses: azure/login@v1 + with: + client-id: ${{ secrets.ARM_CLIENT_ID }} + tenant-id: ${{ secrets.ARM_TENANT_ID }} + subscription-id: ${{ secrets.ARM_SUBSCRIPTION_ID }} + enable-AzPSSession: true + + # Cleanup + - name: Delete all resource groups + uses: azure/powershell@v1 + with: + azPSVersion: "latest" + inlineScript: | + ./scripts/cleanup.ps1 -ResourceGroupPrefix "${{ env.PREFIX }}" + + # Notify + - name: Notify slack + continue-on-error: true + env: + SLACK_CHANNEL_ID: ${{ secrets.SLACK_CHANNEL_ID }} + SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} + run: ./scripts/send_slack_message.sh "[devops-lab] Cleanup complete" diff --git a/.github/workflows/destroy.yml b/.github/workflows/destroy.yml index 638819bc..aa5e2108 100644 --- a/.github/workflows/destroy.yml +++ b/.github/workflows/destroy.yml @@ -17,6 +17,12 @@ on: workflow_dispatch: inputs: {} +# permissions for oidc login +# https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect +permissions: + id-token: write # This is required for requesting the JWT + contents: read # This is required for actions/checkout + # global environment variables # https://help.github.com/en/actions/configuring-and-managing-workflows/using-environment-variables env: @@ -26,9 +32,8 @@ env: # debug CI_DEBUG: true - # azure creds + # azure creds (used with OIDC auth) ARM_CLIENT_ID: ${{ secrets.ARM_CLIENT_ID }} - ARM_CLIENT_SECRET: ${{ secrets.ARM_CLIENT_SECRET }} ARM_SUBSCRIPTION_ID: ${{ secrets.ARM_SUBSCRIPTION_ID }} ARM_TENANT_ID: ${{ secrets.ARM_TENANT_ID }} @@ -40,7 +45,7 @@ env: ENABLE_TLS_INGRESS: true FORCE_TEST_FAIL: false K8S_TLS_SECRET_NAME: tls-secret - KEY_VAULT_NAME: kv-rush-iz6y + KEY_VAULT_NAME: kv-rush-aqy2 KEY_VAULT_CERT_NAME: wildcard-thehypepipe-co-uk KEY_VAULT_RESOURCE_GROUP_NAME: rg-keyvault-acmebot # NOTE: "eastus" is cheaper than "uksouth" @@ -53,95 +58,111 @@ env: TF_IN_AUTOMATION: "true" TF_INPUT: "false" TF_LOG_PATH: terraform.log - TF_LOG: TRACE + TF_LOG: "ERROR" # https://developer.hashicorp.com/terraform/internals/debugging # https://github.com/hashicorp/terraform/releases - TF_VERSION: "1.0.10" + TF_VERSION: "1.3.7" TF_WORKING_DIR: terraform + # azurerm provider oidc + # https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/guides/service_principal_oidc + # https://developer.hashicorp.com/terraform/language/settings/backends/azurerm#oidc_request_token + ARM_USE_OIDC: "true" # Env var concatenation is currently not supported at Workflow or Job scope. See workaround below: # https://github.community/t5/GitHub-Actions/How-can-we-concatenate-multiple-env-vars-at-workflow-and-job/td-p/48489 jobs: destroy: - # always pin versions # view installed software: https://help.github.com/en/actions/reference/software-installed-on-github-hosted-runners - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 + + # https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idenvironment + environment: + name: dev # only run if owner triggered action if: github.actor == github.event.repository.owner.login steps: - # Checkout - # Reference the major version of a release - # https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions#example-using-versioned-actions - - uses: actions/checkout@v2 + # Checkout + # Reference the major version of a release + # https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions#example-using-versioned-actions + - uses: actions/checkout@v3 # specify different branch # NOT required as I've changed the default branch to develop # with: # ref: develop - # Env var concatenation - # https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-commands-for-github-actions#environment-files - - name: Concatenate env vars (Workaround) - run: | - chmod -R +x ./scripts/ - echo "AKS_CLUSTER_NAME=${{ env.PREFIX }}-aks-001" >> $GITHUB_ENV - echo "AKS_RG_NAME=${{ env.PREFIX }}-rg-aks-dev-001" >> $GITHUB_ENV - echo "ARGOCD_FQDN=argocd.${{ env.ROOT_DOMAIN_NAME }}" >> $GITHUB_ENV - echo "DNS_DOMAIN_NAME=nexus.${{ env.ROOT_DOMAIN_NAME }}" >> $GITHUB_ENV - echo "TERRAFORM_STORAGE_ACCOUNT=${{ env.PREFIX }}sttfstate${{ env.LOCATION }}001" >> $GITHUB_ENV - echo "TERRAFORM_STORAGE_RG=${{ env.PREFIX }}-rg-tfstate-dev-001" >> $GITHUB_ENV - echo "VELERO_STORAGE_ACCOUNT=${{ env.PREFIX }}stbckuksouth001" >> $GITHUB_ENV - echo "VELERO_STORAGE_RG=${{ env.PREFIX }}-rg-velero-dev-001" >> $GITHUB_ENV - - # Login - - name: Login to Azure - run: ./scripts/azure_login.sh - - # Ensure AKS cluster is running, else timeouts will occur on k8s Terraform resource destroy tasks - - name: Start AKS Cluster - continue-on-error: true - run: ./scripts/start_aks_cluster.sh - - # Prereqs - - name: Lookup Storage Key - run: ./scripts/storage_key.sh - - - name: Replace tokens in Terraform config files - run: pwsh -command "./scripts/Replace-Tokens.ps1" - env: - IFTTT_WEBHOOK_KEY: ${{ secrets.IFTTT_WEBHOOK_KEY }} - - - name: Create zip file of Function App - run: pwsh -command "./function_app/CreateFunctionAppZip.ps1" - - # Terraform - - uses: hashicorp/setup-terraform@v1 - with: - terraform_version: ${{ env.TF_VERSION }} - - - name: ๐Ÿ’€ Terraform destroy - run: | - terraform init - terraform destroy -no-color -auto-approve - working-directory: ${{ env.TF_WORKING_DIR }} - - - name: Terraform logs - uses: actions/upload-artifact@v2 - with: - name: Terraform logs - path: ${{ env.TF_WORKING_DIR }}/${{ env.TF_LOG_PATH }} - if: always() - - # Cleanup - - name: Delete Storage - run: ./scripts/storage_delete.sh - - # Notify - - name: Notify slack - continue-on-error: true - env: - SLACK_CHANNEL_ID: ${{ secrets.SLACK_CHANNEL_ID }} - SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} - run: ./scripts/send_slack_message.sh "[aks-nexus-velero] Destroy complete" + # Env var concatenation + # https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-commands-for-github-actions#environment-files + - name: Concatenate env vars (Workaround) + run: | + chmod -R +x ./scripts/ + echo "AKS_CLUSTER_NAME=${{ env.PREFIX }}-aks-001" >> $GITHUB_ENV + echo "AKS_RG_NAME=${{ env.PREFIX }}-rg-aks-dev-001" >> $GITHUB_ENV + echo "ARGOCD_FQDN=argocd.${{ env.ROOT_DOMAIN_NAME }}" >> $GITHUB_ENV + echo "DNS_DOMAIN_NAME=nexus.${{ env.ROOT_DOMAIN_NAME }}" >> $GITHUB_ENV + echo "TERRAFORM_STORAGE_ACCOUNT=${{ env.PREFIX }}sttfstate${{ env.LOCATION }}001" >> $GITHUB_ENV + echo "TERRAFORM_STORAGE_CONTAINER=terraform" >> $GITHUB_ENV + echo "TERRAFORM_STORAGE_RG=${{ env.PREFIX }}-rg-tfstate-dev-001" >> $GITHUB_ENV + echo "VELERO_STORAGE_ACCOUNT=${{ env.PREFIX }}stbckuksouth001" >> $GITHUB_ENV + + # Login + # https://github.com/Azure/login + - name: Login via OIDC to Azure Public Cloud (az cli and az powershell) + uses: azure/login@v1 + with: + client-id: ${{ secrets.ARM_CLIENT_ID }} + tenant-id: ${{ secrets.ARM_TENANT_ID }} + subscription-id: ${{ secrets.ARM_SUBSCRIPTION_ID }} + enable-AzPSSession: true + + # Ensure AKS cluster is running, else timeouts will occur on k8s Terraform resource destroy tasks + - name: Start AKS Cluster + continue-on-error: true + run: ./scripts/start_aks_cluster.sh + + # Prereqs + # TODO remove this step + # - name: Lookup Storage Key + # run: ./scripts/storage_key.sh + + - name: Replace tokens in Terraform config files + run: pwsh -command "./scripts/Replace-Tokens.ps1" + env: + IFTTT_WEBHOOK_KEY: ${{ secrets.IFTTT_WEBHOOK_KEY }} + + # Terraform + - uses: hashicorp/setup-terraform@v2 + with: + terraform_version: ${{ env.TF_VERSION }} + + - name: ๐Ÿ’€ Terraform destroy + run: | + echo 'Running terraform init...' + terraform init \ + -backend-config="resource_group_name=$TERRAFORM_STORAGE_RG" \ + -backend-config="storage_account_name=$TERRAFORM_STORAGE_ACCOUNT" + + echo 'Running terraform destroy...' + terraform destroy -no-color -auto-approve + working-directory: ${{ env.TF_WORKING_DIR }} + + - name: Terraform logs + uses: actions/upload-artifact@v2 + with: + name: Terraform logs + path: ${{ env.TF_WORKING_DIR }}/${{ env.TF_LOG_PATH }} + if: always() + + # Cleanup + - name: Delete Storage + run: ./scripts/storage_delete.sh + + # Notify + - name: Notify slack + continue-on-error: true + env: + SLACK_CHANNEL_ID: ${{ secrets.SLACK_CHANNEL_ID }} + SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} + run: ./scripts/send_slack_message.sh "[devops-lab] Destroy complete" diff --git a/.github/workflows/start_aks_cluster.yml b/.github/workflows/start_aks_cluster.yml index 20f17a46..aad037d4 100644 --- a/.github/workflows/start_aks_cluster.yml +++ b/.github/workflows/start_aks_cluster.yml @@ -18,26 +18,29 @@ env: # debug CI_DEBUG: true - # azure creds + # azure creds (used with OIDC auth) ARM_CLIENT_ID: ${{ secrets.ARM_CLIENT_ID }} - ARM_CLIENT_SECRET: ${{ secrets.ARM_CLIENT_SECRET }} ARM_SUBSCRIPTION_ID: ${{ secrets.ARM_SUBSCRIPTION_ID }} ARM_TENANT_ID: ${{ secrets.ARM_TENANT_ID }} jobs: start_aks_cluster: - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 + + # https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idenvironment + environment: + name: dev # only run if owner triggered action if: github.actor == github.event.repository.owner.login steps: # Checkout - - uses: actions/checkout@v2 - # specify different branch - # NOT required as I've changed the default branch to develop - # with: - # ref: develop + - uses: actions/checkout@v3 + # specify different branch + # NOT required as I've changed the default branch to develop + # with: + # ref: develop # Init tasks - inc env var concatenation - name: Init tasks - inc Env var concatenation (Workaround) @@ -47,8 +50,14 @@ jobs: echo "AKS_CLUSTER_NAME=${{ env.PREFIX }}-aks-001" >> $GITHUB_ENV # Login - - name: Login to Azure - run: ./scripts/azure_login.sh + # https://github.com/Azure/login + - name: Login via OIDC to Azure Public Cloud (az cli and az powershell) + uses: azure/login@v1 + with: + client-id: ${{ secrets.ARM_CLIENT_ID }} + tenant-id: ${{ secrets.ARM_TENANT_ID }} + subscription-id: ${{ secrets.ARM_SUBSCRIPTION_ID }} + enable-AzPSSession: true # Start # Prereqs: https://docs.microsoft.com/en-us/azure/aks/start-stop-cluster @@ -61,4 +70,4 @@ jobs: env: SLACK_CHANNEL_ID: ${{ secrets.SLACK_CHANNEL_ID }} SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} - run: ./scripts/send_slack_message.sh "[aks-nexus-velero] Start AKS Cluster complete" + run: ./scripts/send_slack_message.sh "[devops-lab] Start AKS Cluster complete" diff --git a/.github/workflows/stop_aks_cluster.yml b/.github/workflows/stop_aks_cluster.yml index 152263c7..dacf8d1d 100644 --- a/.github/workflows/stop_aks_cluster.yml +++ b/.github/workflows/stop_aks_cluster.yml @@ -18,26 +18,29 @@ env: # debug CI_DEBUG: true - # azure creds + # azure creds (used with OIDC auth) ARM_CLIENT_ID: ${{ secrets.ARM_CLIENT_ID }} - ARM_CLIENT_SECRET: ${{ secrets.ARM_CLIENT_SECRET }} ARM_SUBSCRIPTION_ID: ${{ secrets.ARM_SUBSCRIPTION_ID }} ARM_TENANT_ID: ${{ secrets.ARM_TENANT_ID }} jobs: stop_aks_cluster: - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 + + # https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idenvironment + environment: + name: dev # only run if owner triggered action if: github.actor == github.event.repository.owner.login steps: # Checkout - - uses: actions/checkout@v2 - # specify different branch - # NOT required as I've changed the default branch to develop - # with: - # ref: develop + - uses: actions/checkout@v3 + # specify different branch + # NOT required as I've changed the default branch to develop + # with: + # ref: develop # Init tasks - inc env var concatenation - name: Init tasks - inc Env var concatenation (Workaround) @@ -47,8 +50,14 @@ jobs: echo "AKS_CLUSTER_NAME=${{ env.PREFIX }}-aks-001" >> $GITHUB_ENV # Login - - name: Login to Azure - run: ./scripts/azure_login.sh + # https://github.com/Azure/login + - name: Login via OIDC to Azure Public Cloud (az cli and az powershell) + uses: azure/login@v1 + with: + client-id: ${{ secrets.ARM_CLIENT_ID }} + tenant-id: ${{ secrets.ARM_TENANT_ID }} + subscription-id: ${{ secrets.ARM_SUBSCRIPTION_ID }} + enable-AzPSSession: true # Stop # Prereqs: https://docs.microsoft.com/en-us/azure/aks/start-stop-cluster @@ -61,4 +70,4 @@ jobs: env: SLACK_CHANNEL_ID: ${{ secrets.SLACK_CHANNEL_ID }} SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} - run: ./scripts/send_slack_message.sh "[aks-nexus-velero] Stop AKS Cluster complete" + run: ./scripts/send_slack_message.sh "[devops-lab] Stop AKS Cluster complete" diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 50fbd014..c5b95836 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -36,7 +36,7 @@ jobs: # always pin versions # # view installed software: https://help.github.com/en/actions/reference/software-installed-on-github-hosted-runners - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 env: MY_JOB_VAR: job @@ -46,7 +46,7 @@ jobs: steps: # Checkout - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 # Show event info - name: Show triggered event data @@ -87,4 +87,4 @@ jobs: env: SLACK_CHANNEL_ID: ${{ secrets.SLACK_CHANNEL_ID }} SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} - run: ./scripts/send_slack_message.sh "[aks-nexus-velero] Test notification" + run: ./scripts/send_slack_message.sh "[devops-lab] Test notification" diff --git a/.github/workflows/tfsec.yml b/.github/workflows/tfsec.yml new file mode 100644 index 00000000..29010f2e --- /dev/null +++ b/.github/workflows/tfsec.yml @@ -0,0 +1,38 @@ +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +name: tfsec + +on: + # push: + # branches: [ "develop" ] + # pull_request: + # branches: [ "develop" ] + schedule: + - cron: '44 10 * * 5' + +jobs: + tfsec: + name: Run tfsec sarif report + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + steps: + - name: Clone repo + uses: actions/checkout@v3 + + - name: Run tfsec + uses: aquasecurity/tfsec-sarif-action@9a83b5c3524f825c020e356335855741fd02745f + with: + sarif_file: tfsec.sarif + + - name: Upload SARIF file + uses: github/codeql-action/upload-sarif@v2 + with: + # Path to SARIF file relative to the root of the repository + sarif_file: tfsec.sarif diff --git a/.gitignore b/.gitignore index b07bc59b..5e4daf5a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,8 @@ # Misc .terraform +.terraform.* terraform.tfstate* +tftest localonly credentials* /temp diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8bddb065..15961d24 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,31 +1,27 @@ +# See http://pre-commit.com for more information +# See http://pre-commit.com/hooks.html for more hooks +# +# Install git hooks +# pre-commit install +# Update this file: +# pre-commit autoupdate +# Apply to all files without committing: +# pre-commit run --all-files repos: + - repo: https://github.com/antonbabenko/pre-commit-terraform + rev: v1.76.0 + hooks: + - id: terraform_docs + - id: terraform_fmt + # - id: terraform_validate # this downloads ALL modules locally and takes too long - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v3.4.0 # "" gets replaced with latest repo release versions by running "pre-commit autoupdate" + rev: v4.3.0 hooks: - - id: check-merge-conflict - # - id: trailing-whitespace # find way to ignore markdown files (.md) - # - id: check-yaml - id: check-added-large-files + - id: check-json + - id: check-merge-conflict + - id: check-yaml - id: detect-private-key - - - repo: https://github.com/antonbabenko/pre-commit-terraform.git - rev: v1.45.0 # Get the latest from: https://github.com/antonbabenko/pre-commit-terraform/releases - hooks: - - id: terraform_docs - - id: terraform_fmt - - id: terraform_validate - - id: terraform_tflint - - id: terraform_tfsec - - # ! WIP: unhandled errors running tflint with pre-commit - # - repo: https://github.com/gruntwork-io/pre-commit - # rev: v0.1.12 # Get the latest from: https://github.com/gruntwork-io/pre-commit/releases - # hooks: - # - id: tflint - # args: - # - "terraform" - # # - "--module" - # # - "--deep" - # - "--config=.tflint.hcl" - # # # - id: terraform-validate - # # # - id: terraform-fmt + - id: end-of-file-fixer + - id: fix-byte-order-marker + - id: trailing-whitespace diff --git a/README.md b/README.md index c85ac340..7a35115c 100644 --- a/README.md +++ b/README.md @@ -18,8 +18,9 @@ being used now. - [Configure Key Vault / LetsEncrypt TLS Certificate](#configure-key-vault--letsencrypt-tls-certificate) - [Configure Azure Authentication](#configure-azure-authentication) - [Create Secrets](#create-secrets) - - [Running the Build workflow](#running-the-build-workflow) - - [Running the Destroy workflow](#running-the-destroy-workflow) + - [Update the Workflow Environment Variables](#update-the-workflow-environment-variables) + - [Running the Build Workflow](#running-the-build-workflow) + - [Running the Destroy Workflow](#running-the-destroy-workflow) ## Getting Started @@ -38,8 +39,8 @@ dynamically update DNS records. #### Configure Key Vault / LetsEncrypt TLS Certificate -Use the [keyvault-acmebot Getting Started guide](https://github.com/shibayan/keyvault-acmebot#getting-started) to -deploy AcmeBot and configure a wildcard certificate for your domain. +Use the [keyvault-acmebot Getting Started guide](https://github.com/shibayan/keyvault-acmebot/wiki/Getting-Started) to +deploy AcmeBot and configure a wildcard certificate for your domain (eg: `*.domain.com`). ### Configure Azure Authentication @@ -52,16 +53,21 @@ configured for Azure. ### Create Secrets +TODO: Update this for OIDC auth (federated credential): https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/guides/service_principal_oidc#configuring-the-service-principal-in-terraform + Once Azure authentication has been configured, the Service Principle credential values can be [passed as environment variables](https://registry.terraform.io/providers/hashicorp/azuread/latest/docs/guides/service_principal_client_secret#configuring-the-service-principal-in-terraform). [Use these instructions](https://docs.github.com/en/free-pro-team@latest/actions/reference/encrypted-secrets#creating-encrypted-secrets-for-a-repository) to create the following secrets for your repository: - `ARM_CLIENT_ID` -- `ARM_CLIENT_SECRET` - `ARM_SUBSCRIPTION_ID` - `ARM_TENANT_ID` -### Running the Build workflow +### Update the Workflow Environment Variables + +TODO - update key vault and dns env vars. + +### Running the Build Workflow Now that Azure authentication has been configured with corresponding secrets, the build workflow is ready to be run: @@ -70,7 +76,7 @@ Now that Azure authentication has been configured with corresponding secrets, th 1. Select the desired branch. 1. Click the `Run workflow` button. -### Running the Destroy workflow +### Running the Destroy Workflow There will be ongoing costs if the environment is left running, so to avoid unexpected bills the destroy workflow should be run once testing has been completed: diff --git a/ansible/roles/chocolatey_repo/defaults/main.yml b/ansible/roles/chocolatey_repo/defaults/main.yml index efaa8164..4011a08c 100644 --- a/ansible/roles/chocolatey_repo/defaults/main.yml +++ b/ansible/roles/chocolatey_repo/defaults/main.yml @@ -26,10 +26,11 @@ chocolatey_proxy_repo_request_body: timeout: enableCircularRedirects: false enableCookies: false + useTrustStore: false authentication: routingRuleName: nugetProxy: queryCacheItemMaxAge: 3600 + nugetVersion: V2 format: nuget type: proxy - diff --git a/ansible/roles/helm_repo/tasks/main.yml b/ansible/roles/helm_repo/tasks/main.yml index 7252a4ce..d01e30b4 100644 --- a/ansible/roles/helm_repo/tasks/main.yml +++ b/ansible/roles/helm_repo/tasks/main.yml @@ -24,6 +24,5 @@ - name: Upload Helm Charts to Nexus shell: cmd: "curl --insecure -v -u {{ api_user }}:{{ admin_password }} {{ api_base_uri }}/repository/{{ helm_repo_request_body.name }}/ --upload-file {{ item.name }}-{{ item.version }}.tgz" - warn: false with_items: - "{{ helm_charts }}" diff --git a/ansible/roles/init/tasks/main.yml b/ansible/roles/init/tasks/main.yml index bcebd5b6..ad8562af 100644 --- a/ansible/roles/init/tasks/main.yml +++ b/ansible/roles/init/tasks/main.yml @@ -4,7 +4,8 @@ shell: | podName=$(kubectl get pod --namespace {{ nexus_namespace }} -l app.kubernetes.io/name=sonatype-nexus -o jsonpath="{.items[0].metadata.name}") - kubectl cp --namespace {{ nexus_namespace }} {{role_path}}/files/get_admin_password.sh "$podName":/tmp/get_admin_password.sh + # kubectl cp --namespace {{ nexus_namespace }} {{role_path}}/files/get_admin_password.sh "$podName":/tmp/get_admin_password.sh + cat {{role_path}}/files/get_admin_password.sh | kubectl exec -i -n {{ nexus_namespace }} "$podName" "--" sh -c "cat > /tmp/get_admin_password.sh" kubectl exec --namespace {{ nexus_namespace }} "$podName" -- sh -c "chmod +x /tmp/get_admin_password.sh; timeout {{ admin_password_script_timeout_mins }}m /tmp/get_admin_password.sh" diff --git a/ansible/roles/nuget_repo/defaults/main.yml b/ansible/roles/nuget_repo/defaults/main.yml index 3d9e8f3b..483dc9b6 100644 --- a/ansible/roles/nuget_repo/defaults/main.yml +++ b/ansible/roles/nuget_repo/defaults/main.yml @@ -25,17 +25,18 @@ nuget_proxy_repo_request_body: timeToLive: 1440 httpClient: blocked: false - autoBlock: true + autoBlock: false connection: retries: userAgentSuffix: timeout: enableCircularRedirects: false enableCookies: false + useTrustStore: false authentication: routingRuleName: nugetProxy: queryCacheItemMaxAge: 3600 + nugetVersion: V2 format: nuget type: proxy - diff --git a/ansible/site.yml b/ansible/site.yml index 18540363..3e33862f 100644 --- a/ansible/site.yml +++ b/ansible/site.yml @@ -1,46 +1,46 @@ -# configure Nexus via rest api -# -# cd ansible -# ansible-playbook site.yml --extra-vars "api_base_uri=https://nexus.thehypepipe.co.uk" -# -# set env vars -# export NEW_ADMIN_PASSWORD= -# export AUTOGENERATED_ADMIN_PASSWORD= ---- -- name: configure nexus - hosts: localhost - connection: local - gather_facts: no - # debugger: always - - # DEFAULTS - # reduce calling the same module repeatedly with the same arguments - # https://docs.ansible.com/ansible/latest/user_guide/playbooks_module_defaults.html - module_defaults: - uri: - # automatically formats body as json, and sets the Content-Type header - body_format: json - user: "{{ api_user }}" - password: "{{ admin_password }}" - force_basic_auth: yes - timeout: 10 - validate_certs: false - - tasks: - # ROLES - - import_role: - name: init - - import_role: - name: user - - import_role: - name: nuget_repo - - import_role: - name: chocolatey_repo - - import_role: - name: docker_repo - - import_role: - name: helm_repo -# - import_role: -# name: pypi_repo - - import_role: - name: raw_repo +# configure Nexus via rest api +# +# cd ansible +# ansible-playbook site.yml --extra-vars "api_base_uri=https://nexus.thehypepipe.co.uk" +# +# set env vars +# export NEW_ADMIN_PASSWORD= +# export AUTOGENERATED_ADMIN_PASSWORD= +--- +- name: configure nexus + hosts: localhost + connection: local + gather_facts: no + # debugger: always + + # DEFAULTS + # reduce calling the same module repeatedly with the same arguments + # https://docs.ansible.com/ansible/latest/user_guide/playbooks_module_defaults.html + module_defaults: + uri: + # automatically formats body as json, and sets the Content-Type header + body_format: json + user: "{{ api_user }}" + password: "{{ admin_password }}" + force_basic_auth: yes + timeout: 10 + validate_certs: false + + tasks: + # ROLES + - import_role: + name: init + - import_role: + name: user + - import_role: + name: nuget_repo + - import_role: + name: chocolatey_repo + - import_role: + name: docker_repo + - import_role: + name: helm_repo + # - import_role: + # name: pypi_repo + - import_role: + name: raw_repo diff --git a/function_app/host.json b/function_app/host.json index c1437cbc..6ae17b86 100644 --- a/function_app/host.json +++ b/function_app/host.json @@ -10,7 +10,7 @@ }, "extensionBundle": { "id": "Microsoft.Azure.Functions.ExtensionBundle", - "version": "[1.*, 2.0.0)" + "version": "[2.*, 3.0.0)" }, "managedDependency": { "enabled": true diff --git a/nexus/repositories/nuget/PSvCloud/Public/Edge/Get-CIEdgeSecurityCheck.ps1 b/nexus/repositories/nuget/PSvCloud/Public/Edge/Get-CIEdgeSecurityCheck.ps1 index 6c256bd9..77414341 100644 --- a/nexus/repositories/nuget/PSvCloud/Public/Edge/Get-CIEdgeSecurityCheck.ps1 +++ b/nexus/repositories/nuget/PSvCloud/Public/Edge/Get-CIEdgeSecurityCheck.ps1 @@ -154,4 +154,4 @@ function Get-CIEdgeSecurityCheck { } } # End process -} # End function \ No newline at end of file +} # End function diff --git a/scripts/Delete-ResourceGroups.ps1 b/scripts/Delete-ResourceGroups.ps1 new file mode 100644 index 00000000..53ca16dc --- /dev/null +++ b/scripts/Delete-ResourceGroups.ps1 @@ -0,0 +1,111 @@ +<# +.SYNOPSIS + Deletes Azure Resource Groups with a given prefix +.DESCRIPTION + Deletes Azure Resource Groups with a given prefix, with confirmation prompt and WhatIf functionality +.PARAMETER Prefixes + An array of prefix strings that matches the start of the Resource Group name + "abc99", "abc12" would match resource group called "abc99-rg-blahblah" and "abc12-rg-blahblah" + Wildcards are supported, so you could use "abc*" instead of "abc99" and "abc12". +.PARAMETER MaxLimit + Aborts script if too many Resource Groups are found. + This is a safety check. +.PARAMETER WhatIf + Does a dry-run and shows what Resource Groups would be deleted. +.EXAMPLE + ./Delete-ResourceGroups.ps1 -Prefixes abc99. + + Deletes all Resource Groups starting with "abc99", eg: + "abc99-rg-one" + "abc99-rg-two" +.EXAMPLE + ./Delete-ResourceGroups.ps1 -Prefixes abc99 -WhatIf + + Shows what Resource Groups would be deleted +.NOTES + Author: Adam Rush + GitHub: adamrushuk + Twitter: @adamrushuk +#> +[CmdletBinding()] +param ( + [Parameter(Mandatory)] + [ValidateNotNull()] + [string[]] + $Prefixes, + + [int] + $MaxLimit = 2, + + [switch] + $WhatIf +) + +Write-Output "Searching for Resource groups starting with [$($Prefixes -join ', ')]" + +# init +$resourceGroupsToDelete = $null +$jobs = $null + +foreach ($Prefix in $Prefixes) { + + $resourceGroups = $null + $resourceGroups = @(Get-AzResourceGroup -Name "$Prefix*") + Write-Host "`nResource groups found starting with [$Prefix]: [$($resourceGroups.Count)]" -ForegroundColor Yellow + + # abort if we find no resource groups + if ($resourceGroups.Count -eq 0) { + Write-Host "Continuing...`n" -ForegroundColor Green + continue + } + + # safety check + if ($resourceGroups.Count -gt $MaxLimit) { + Write-Host "ABORTING, MaxLimit was hit. Over [$MaxLimit] resource groups were found." -ForegroundColor Red + return + } + + + # show resource groups + $resourceGroups | Select-Object -ExpandProperty "ResourceGroupName" + Write-Output "" + + # confirm deletion + $confirmation = $null + while($confirmation -ne "y") { + if ($confirmation -eq 'n') { break } + + $confirmation = Read-Host "Are you sure you want to select these [$($resourceGroups.Count)] Resource Groups for deletion? [y/n]" + } + + # queue + if ($confirmation -eq "y") { + Write-Output "Queuing [$($resourceGroups.Count)] Resource Groups..." + $resourceGroupsToDelete += $resourceGroups + } else { + Write-Host "Skipping...`n" -ForegroundColor Yellow + } +} + +# delete +if ($resourceGroupsToDelete.Count -gt 0) { + Write-Output "Deleting [$($resourceGroupsToDelete.Count)] Resource Groups..." + if ($WhatIf.IsPresent) { + $resourceGroupsToDelete | Remove-AzResourceGroup -Force -WhatIf + } else { + $timer = [Diagnostics.Stopwatch]::StartNew() + $jobs += $resourceGroupsToDelete | Remove-AzResourceGroup -Force -AsJob + } +} + +# wait for jobs to complete +if ($null -ne $jobs) { + $jobs + + Write-Output "`nWaiting for [$($jobs.Count)] jobs to finish..." + $jobs | Wait-Job + $jobs | Receive-Job -Keep + + $timer.Stop() + Write-Output "Deletion jobs completed in: [$($timer.Elapsed.Minutes)m$($timer.Elapsed.Seconds)s]" +} diff --git a/scripts/Fix-FunctionApp.ps1 b/scripts/Fix-FunctionApp.ps1 index 2ce126ff..54c4363d 100644 --- a/scripts/Fix-FunctionApp.ps1 +++ b/scripts/Fix-FunctionApp.ps1 @@ -1,19 +1,19 @@ # source: https://github.com/terraform-providers/terraform-provider-azurerm/issues/8867#issuecomment-849842849 -# Fixes PowerShell function app stack version to 7 +# Fixes PowerShell function app stack version to 7, and restarts -# Change function app to PS 7 -Write-Host 'Change function app to PS 7' $function = az functionapp show --name $env:FUNCTION_APP_NAME --resource-group $env:FUNCTION_APP_RG | ConvertFrom-Json +# TODO: I dont think this is required anymore as I use application_stack > powershell_core_version if ($function.siteConfig.powerShellVersion -ne "~7") { - Write-Host "Updating powershell version to ~7..." - az functionapp update --name $env:FUNCTION_APP_NAME --resource-group $env:FUNCTION_APP_RG --set "siteConfig.powerShellVersion=~7" - - # Restart Function App - # az functionapp restart --name $env:FUNCTION_APP_NAME --resource-group $env:FUNCTION_APP_RG + Write-Host "[NoOp] Updating powershell version to ~7..." + # az functionapp update --name $env:FUNCTION_APP_NAME --resource-group $env:FUNCTION_APP_RG --set "siteConfig.powerShellVersion=~7" } else { Write-Host "Powershell version already set to to ~7" } -Write-Host '' +# Restart Function App +Write-Host "Restarting function app [$($env:FUNCTION_APP_NAME)]..." +az functionapp restart --name $env:FUNCTION_APP_NAME --resource-group $env:FUNCTION_APP_RG + +Write-Host 'FINISHED.' diff --git a/scripts/Start-Test.ps1 b/scripts/Start-Test.ps1 index 6b7db6ab..78bd8951 100755 --- a/scripts/Start-Test.ps1 +++ b/scripts/Start-Test.ps1 @@ -8,9 +8,6 @@ $ErrorActionPreference = "Stop" #endregion Write-Verbose "Started in folder: [$(Get-Location)]" -Write-Verbose "Changing directory to test folder..." -Set-Location "test" - Write-Verbose "STARTED: pwsh test task in current folder: [$(Get-Location)]" # Install Pester @@ -18,7 +15,7 @@ $taskMessage = "Installing Pester " Write-Verbose "STARTED: $taskMessage..." try { Set-PSRepository -Name "PSGallery" -InstallationPolicy "Trusted" - Install-Module -Name "Pester" -Scope "CurrentUser" -Repository "PSGallery" -MinimumVersion 5.1.0 -Verbose + Install-Module -Name "Pester" -Scope "CurrentUser" -Repository "PSGallery" -MinimumVersion 5.3.0 -Verbose Write-Verbose "FINISHED: $taskMessage." } @@ -31,9 +28,9 @@ catch { $taskMessage = "Running Pester tests" Write-Verbose "STARTED: $taskMessage..." try { - $testScripts = Get-ChildItem -Path "*.tests.ps1" - Invoke-Pester -Script $testScripts -PassThru -OutputFormat "NUnitXml" -OutputFile "pester-test-results.xml" -Verbose -ErrorAction "Stop" - + # $testScripts = Get-ChildItem -Path "*.Tests.ps1" + # Invoke-Pester -Script $testScripts -PassThru -OutputFormat "JUnitXml" -OutputFile "pester-test-results.xml" -Verbose -ErrorAction "Stop" + Invoke-Pester -Path './tests' -CI -Verbose Write-Verbose "FINISHED: $taskMessage." } catch { diff --git a/scripts/aks-csi-disk-expand.sh b/scripts/aks-csi-disk-expand.sh new file mode 100644 index 00000000..91381caa --- /dev/null +++ b/scripts/aks-csi-disk-expand.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash + +kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/deploy/example/pvc-azuredisk-csi.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/deploy/example/nginx-pod-azuredisk.yaml + +# check disk size in pod +kubectl exec -it nginx-azuredisk -- df -h /mnt/azuredisk + + Filesystem Size Used Available Use% Mounted on + /dev/sdd 9.7G 36.0K 9.7G 0% /mnt/azuredisk + +# ! this step ONLY required when using AKS v1.20 or below +# [optional] delete pod to unattach disk +kubectl delete -f https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/deploy/example/nginx-pod-azuredisk.yaml + + + +# Waits for an AKS disk to report "Unattached" + +# vars +SUBSCRIPTION_NAME="" +AKS_CLUSTER_RESOURCEGROUP_NAME="" +AKS_CLUSTER_NAME="" +PVC_NAME="pvc-azuredisk" + +# login +az login +az account set --subscription "$SUBSCRIPTION_NAME" + +# get cluster and associated "node resource group" (where resources live) +DISK_RESOURCEGROUP_NAME=$(az aks show --name "$AKS_CLUSTER_NAME" --resource-group "$AKS_CLUSTER_RESOURCEGROUP_NAME" --query "nodeResourceGroup" --output tsv) + +# define reusable function +get_disk_info() { + az disk list --resource-group "$DISK_RESOURCEGROUP_NAME" --query "[?tags.\"kubernetes.io-created-for-pvc-name\" == '$PVC_NAME' ].{state:diskState, diskSizeGb:diskSizeGb, name:name, pvcname:tags.\"kubernetes.io-created-for-pvc-name\"}" --output table +} + +# get disk associated with AKS PVC name +echo 'Waiting for disk to become "Unattached"...' +get_disk_info + +# wait for disk state to detach +START_TIME=$SECONDS + +while true; do + # get disk info + DISK_OUTPUT=$(get_disk_info) + + # check disk state + if echo "$DISK_OUTPUT" | grep Attached; then + sleep 10 + elif echo "$DISK_OUTPUT" | grep Unattached; then + echo "Disk is now Unattached." + break + fi +done + +ELAPSED_TIME=$(($SECONDS - $START_TIME)) +echo "Disk took [$(($ELAPSED_TIME / 60))m$(($ELAPSED_TIME % 60))s] to change states" + +# final disk info +get_disk_info + + + +# expand pvc +kubectl patch pvc pvc-azuredisk --type merge --patch '{"spec": {"resources": {"requests": {"storage": "15Gi"}}}}' + +# create pod again +kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/deploy/example/nginx-pod-azuredisk.yaml + +# check disk size in pod +kubectl exec -it nginx-azuredisk -- df -h /mnt/azuredisk diff --git a/scripts/ansible.sh b/scripts/ansible.sh index 6e1c8332..b2156588 100755 --- a/scripts/ansible.sh +++ b/scripts/ansible.sh @@ -6,6 +6,10 @@ set -euo pipefail trap "echo 'error: Script failed: see failed command above'" ERR +# Info +echo "Ansible version info..." +ansible --version + # Init # Get AKS Cluster credentials message="Merging AKS credentials" diff --git a/scripts/azure_login.ps1 b/scripts/azure_login.ps1 index c361fd03..5923ec8b 100644 --- a/scripts/azure_login.ps1 +++ b/scripts/azure_login.ps1 @@ -2,10 +2,19 @@ $taskMessage="Logging in to Azure" Write-Output "STARTED: $taskMessage..." -az login --service-principal --tenant "$env:ARM_TENANT_ID" -u "$env:ARM_CLIENT_ID" -p "$env:ARM_CLIENT_SECRET" -Write-Output "FINISHED: $taskMessage." -$taskMessage="Selecting Subscription" -Write-Output "STARTED: $taskMessage..." -az account set --subscription "$env:ARM_SUBSCRIPTION_ID" +# Write-Output "Env vars loaded for Client ID: [$($env:ARM_CLIENT_ID)]" + +# Login PowerShell and Az CLI sessions with Service Principal env vars +Write-Output "Authenticating PowerShell and Az CLI sessions using env vars..." +$servicePrincipleCredential = [pscredential]::new($env:ARM_CLIENT_ID, (ConvertTo-SecureString $env:ARM_CLIENT_SECRET -AsPlainText -Force)) +Connect-AzAccount -ServicePrincipal -Tenant $env:ARM_TENANT_ID -Credential $servicePrincipleCredential -Subscription $env:ARM_SUBSCRIPTION_ID -Verbose + +# Set context to specific subscription +az login --service-principal --username $env:ARM_CLIENT_ID --password $env:ARM_CLIENT_SECRET --tenant $env:ARM_TENANT_ID +az account set --subscription $env:ARM_SUBSCRIPTION_ID +az account show + +Write-Output "PowerShell and Az CLI session logins complete." + Write-Output "FINISHED: $taskMessage." diff --git a/scripts/cleanup.ps1 b/scripts/cleanup.ps1 new file mode 100644 index 00000000..cd042e6a --- /dev/null +++ b/scripts/cleanup.ps1 @@ -0,0 +1,25 @@ +# cleanup all resource groups +# useful after failed build/destroy workflows + +param( + [Parameter(Mandatory)] + [ValidateNotNull()] + [string] + $ResourceGroupPrefix +) + +$taskMessage = "Deleting all devops lab resource groups" +Write-Output "STARTED: $taskMessage..." + +Write-Output "Found these resource groups:" +$resourceGroupsToDelete = Get-AzResourceGroup -Name "$ResourceGroupPrefix*" +$resourceGroupsToDelete.ResourceGroupName + +Write-Output "Deleting 'AsJob' for async removal..." +$jobs = $resourceGroupsToDelete | Remove-AzResourceGroup -Force -AsJob + +Write-Output "Waiting for [$($jobs.Count)] jobs to finish..." +$jobs | Wait-Job +$jobs | Receive-Job -Keep + +Write-Output "FINISHED: $taskMessage." diff --git a/scripts/initial_lab_setup.sh b/scripts/initial_lab_setup.sh new file mode 100644 index 00000000..f9ef86f7 --- /dev/null +++ b/scripts/initial_lab_setup.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +# Steps from README to complete the initial lab setup + +# Vars +DNS_RG_NAME="rg-dns" +LOCATION="eastus" +ROOT_DOMAIN_NAME="thehypepipe.co.uk" + +# Configure DNS Zone +az group create --name "$DNS_RG_NAME" --location "$LOCATION" +az network dns zone create --resource-group "$DNS_RG_NAME" --name "$ROOT_DOMAIN_NAME" diff --git a/scripts/push_docker_images.sh b/scripts/push_docker_images.sh index 34ee7ed7..f235f865 100644 --- a/scripts/push_docker_images.sh +++ b/scripts/push_docker_images.sh @@ -5,31 +5,44 @@ set -euo pipefail trap "echo 'error: Script failed: see failed command above'" ERR +# vars +# DOCKER_FQDN='docker.thehypepipe.co.uk' +DOCKER_SERVER="https://$DOCKER_FQDN" + # Check if images already exist -message="Pushing docker images" -echo -e "\nSTARTED: $message..." +main_message="Pushing docker image tasks" +echo -e "\nSTARTED: $main_message..." # List repositories -repos=$(curl -s "$DOCKER_FQDN/v2/_catalog" | jq ".repositories") +repos=$(curl -s "$DOCKER_SERVER/v2/_catalog" | jq ".repositories") if [[ "$repos" == "[]" ]]; then + message="Pulling base images..." + echo -e "\nSTARTED: $message..." docker pull busybox docker pull nginxdemos/hello + echo -e "\nFINISHED: $message." + message="Tagging images..." docker image tag busybox "$DOCKER_FQDN/busybox" docker image tag nginxdemos/hello "$DOCKER_FQDN/hello" + echo -e "\nFINISHED: $message." + message="Listing images..." docker image ls "$DOCKER_FQDN/busybox" docker image ls "$DOCKER_FQDN/hello" + echo -e "\nFINISHED: $message." + message="Pushing images..." docker push "$DOCKER_FQDN/busybox" docker push "$DOCKER_FQDN/hello" - echo -e "\nFINISHED: $message." + + echo -e "\nFINISHED: $main_message." else - echo -e "\nSKIPPING: $message...they already exist in repo." + echo -e "\nSKIPPING: $main_message...they already exist in repo." # List tags - curl -s "$DOCKER_FQDN/v2/busybox/tags/list" - curl -s "$DOCKER_FQDN/v2/hello/tags/list" + curl -s "$DOCKER_SERVER/v2/busybox/tags/list" + curl -s "$DOCKER_SERVER/v2/hello/tags/list" fi diff --git a/scripts/storage_create.sh b/scripts/storage_create.sh index 8473fab1..a133bf89 100644 --- a/scripts/storage_create.sh +++ b/scripts/storage_create.sh @@ -15,17 +15,24 @@ echo "FINISHED: $taskMessage." # Storage Account taskMessage="Creating Storage Account" echo "STARTED: $taskMessage..." -az storage account create --name "$TERRAFORM_STORAGE_ACCOUNT" --resource-group "$TERRAFORM_STORAGE_RG" --location "$LOCATION" --sku "Standard_LRS" +STORAGE_ID=$(az storage account create --name "$TERRAFORM_STORAGE_ACCOUNT" \ + --resource-group "$TERRAFORM_STORAGE_RG" --location "$LOCATION" --sku "Standard_LRS" --query id --output tsv) echo "FINISHED: $taskMessage." # Storage Container taskMessage="Creating Storage Container" echo "STARTED: $taskMessage..." -az storage container create --name "terraform" --account-name "$TERRAFORM_STORAGE_ACCOUNT" +az storage container create --name "$TERRAFORM_STORAGE_CONTAINER" --account-name "$TERRAFORM_STORAGE_ACCOUNT" echo "FINISHED: $taskMessage." -# Get latest supported AKS version -taskMessage="Finding latest supported AKS version" +# Storage Container Role Assignment +taskMessage="Storage Container Role Assignment" echo "STARTED: $taskMessage..." -az aks get-versions -l "$LOCATION" --query "orchestrators[-1].orchestratorVersion" -o tsv +# define container scope +TERRAFORM_STORAGE_CONTAINER_SCOPE="$STORAGE_ID/blobServices/default/containers/$TERRAFORM_STORAGE_CONTAINER" +echo "$TERRAFORM_STORAGE_CONTAINER_SCOPE" + +# assign rbac +az role assignment create --assignee "$ARM_CLIENT_ID" --role "Storage Blob Data Contributor" \ + --scope "$TERRAFORM_STORAGE_CONTAINER_SCOPE" echo "FINISHED: $taskMessage." diff --git a/splunk/main.tf b/splunk/main.tf index 38681e24..a57c403f 100644 --- a/splunk/main.tf +++ b/splunk/main.tf @@ -42,8 +42,8 @@ locals { # version used for both main AKS API service, and default node pool # https://github.com/Azure/AKS/releases # az aks get-versions --location uksouth --output table - kubernetes_version = "1.20.7" - location = "uksouth" + kubernetes_version = "1.20.7" + location = "uksouth" # prefix = "ar${random_string.aks.result}" # aks dns_prefix must start with a letter prefix = "arsplunk" # aks dns_prefix must start with a letter resource_group_name = "${local.prefix}-rg-azurerm-kubernetes-cluster" diff --git a/terraform/README.md b/terraform/README.md index 7001f0cd..5f121776 100644 --- a/terraform/README.md +++ b/terraform/README.md @@ -1,9 +1,221 @@ -# Test README +# Terraform + +## usage + +```bash +# login via service principle +azh + +# init +cd ./terraform +terraform init -backend=false -input=false + +# validate +terraform validate + +# show plan and apply +terraform apply + +# show outputs +# terraform output function + +# test function +# eval curl $(terraform output --raw function_url)?Name=Adam +# eval curl $(terraform output --raw function_url)?Name=Tasha + + +# CLEANUP +terraform destroy +``` **PRE-COMMIT-TERRAFORM DOCS** content will be automatically created below: --- -*auto populated information +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3 | +| [archive](#requirement\_archive) | 2.2.0 | +| [azuread](#requirement\_azuread) | ~> 2.29.0 | +| [azurerm](#requirement\_azurerm) | ~> 3.29.1 | +| [helm](#requirement\_helm) | ~> 2.7.1 | +| [kubernetes](#requirement\_kubernetes) | ~> 2.14.0 | +| [local](#requirement\_local) | ~> 2.2 | +| [null](#requirement\_null) | ~> 3.1 | +| [random](#requirement\_random) | ~> 3.1 | +| [template](#requirement\_template) | ~> 2.2 | +| [tls](#requirement\_tls) | ~> 3.3 | + +## Providers + +| Name | Version | +|------|---------| +| [archive](#provider\_archive) | 2.2.0 | +| [azuread](#provider\_azuread) | ~> 2.29.0 | +| [azurerm](#provider\_azurerm) | ~> 3.29.1 | +| [helm](#provider\_helm) | ~> 2.7.1 | +| [kubernetes](#provider\_kubernetes) | ~> 2.14.0 | +| [local](#provider\_local) | ~> 2.2 | +| [null](#provider\_null) | ~> 3.1 | +| [template](#provider\_template) | ~> 2.2 | +| [tls](#provider\_tls) | ~> 3.3 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [aks](#module\_aks) | adamrushuk/aks/azurerm | ~> 1.1.0 | + +## Resources + +| Name | Type | +|------|------| +| [azuread_application.argocd](https://registry.terraform.io/providers/hashicorp/azuread/latest/docs/resources/application) | resource | +| [azuread_application_password.argocd](https://registry.terraform.io/providers/hashicorp/azuread/latest/docs/resources/application_password) | resource | +| [azuread_service_principal.argocd](https://registry.terraform.io/providers/hashicorp/azuread/latest/docs/resources/service_principal) | resource | +| [azuread_service_principal.msgraph](https://registry.terraform.io/providers/hashicorp/azuread/latest/docs/resources/service_principal) | resource | +| [azurerm_application_insights.appinsights](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/application_insights) | resource | +| [azurerm_linux_function_app.func_app](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/linux_function_app) | resource | +| [azurerm_log_analytics_solution.aks](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/log_analytics_solution) | resource | +| [azurerm_log_analytics_workspace.aks](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/log_analytics_workspace) | resource | +| [azurerm_resource_group.aks](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/resource_group) | resource | +| [azurerm_role_assignment.aks_dns_mi_to_rg](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.aks_dns_mi_to_zone](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.aks_mi_aks_node_rg_mi_operator](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.aks_mi_aks_node_rg_vm_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.aks_mi_kv_certs](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.aks_mi_kv_keys](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.aks_mi_kv_secrets](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.func_app_aks](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.func_app_storage](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.velero_mi_aks_node_rg_vm_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.velero_mi_velero_storage_rg](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_service_plan.func_app](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/service_plan) | resource | +| [azurerm_storage_account.func_app](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/storage_account) | resource | +| [azurerm_storage_account.velero](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/storage_account) | resource | +| [azurerm_storage_blob.func_app](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/storage_blob) | resource | +| [azurerm_storage_container.func_app](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/storage_container) | resource | +| [azurerm_storage_container.velero](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/storage_container) | resource | +| [azurerm_user_assigned_identity.external_dns](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/user_assigned_identity) | resource | +| [azurerm_user_assigned_identity.velero](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/user_assigned_identity) | resource | +| [helm_release.aad_pod_identity](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [helm_release.akv2k8s](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [helm_release.argocd](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [helm_release.cert_manager](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [helm_release.external_dns](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [helm_release.kured](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [helm_release.nexus](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [helm_release.nginx](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [helm_release.velero](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [kubernetes_namespace.aad_pod_identity](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | +| [kubernetes_namespace.akv2k8s](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | +| [kubernetes_namespace.argocd](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | +| [kubernetes_namespace.external_dns](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | +| [kubernetes_namespace.gitlab](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | +| [kubernetes_namespace.ingress](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | +| [kubernetes_namespace.kured](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | +| [kubernetes_namespace.nexus](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | +| [kubernetes_namespace.velero](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | +| [kubernetes_secret.velero_credentials](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/secret) | resource | +| [local_sensitive_file.kubeconfig](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/sensitive_file) | resource | +| [null_resource.argocd_apps](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.argocd_cert_sync](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.argocd_cm](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.argocd_configure](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.argocd_rbac_cm](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.argocd_secret](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.azureIdentity_external_dns](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.gitlab_cert_sync](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.nexus_cert_sync](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [tls_private_key.ssh](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource | +| [archive_file.func_app](https://registry.terraform.io/providers/hashicorp/archive/2.2.0/docs/data-sources/file) | data source | +| [azuread_application_published_app_ids.well_known](https://registry.terraform.io/providers/hashicorp/azuread/latest/docs/data-sources/application_published_app_ids) | data source | +| [azuread_client_config.current](https://registry.terraform.io/providers/hashicorp/azuread/latest/docs/data-sources/client_config) | data source | +| [azuread_group.argocd_admins](https://registry.terraform.io/providers/hashicorp/azuread/latest/docs/data-sources/group) | data source | +| [azurerm_client_config.current](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/client_config) | data source | +| [azurerm_dns_zone.dns](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/dns_zone) | data source | +| [azurerm_key_vault.kv](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/key_vault) | data source | +| [azurerm_resource_group.aks_node_rg](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/resource_group) | data source | +| [azurerm_resource_group.dns](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/resource_group) | data source | +| [azurerm_subscription.current](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/subscription) | data source | +| [template_file.azureIdentities](https://registry.terraform.io/providers/hashicorp/template/latest/docs/data-sources/file) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aad\_pod\_identity\_chart\_version](#input\_aad\_pod\_identity\_chart\_version) | https://github.com/Azure/aad-pod-identity/blob/master/charts/aad-pod-identity/Chart.yaml#L4 helm search repo aad-pod-identity/aad-pod-identity | `string` | `"4.1.10"` | no | +| [admin\_username](#input\_admin\_username) | The admin username of the VMs that will be deployed | `string` | `"sysadmin"` | no | +| [agent\_pool\_enable\_auto\_scaling](#input\_agent\_pool\_enable\_auto\_scaling) | n/a | `bool` | `false` | no | +| [agent\_pool\_node\_count](#input\_agent\_pool\_node\_count) | Agent Pool | `number` | `1` | no | +| [agent\_pool\_node\_max\_count](#input\_agent\_pool\_node\_max\_count) | n/a | `any` | `null` | no | +| [agent\_pool\_node\_min\_count](#input\_agent\_pool\_node\_min\_count) | n/a | `any` | `null` | no | +| [agent\_pool\_profile\_disk\_size\_gb](#input\_agent\_pool\_profile\_disk\_size\_gb) | n/a | `number` | `30` | no | +| [agent\_pool\_profile\_name](#input\_agent\_pool\_profile\_name) | n/a | `string` | `"default"` | no | +| [agent\_pool\_profile\_os\_type](#input\_agent\_pool\_profile\_os\_type) | n/a | `string` | `"Linux"` | no | +| [agent\_pool\_profile\_vm\_size](#input\_agent\_pool\_profile\_vm\_size) | n/a | `string` | `"Standard_D4s_v3"` | no | +| [aks\_admins\_aad\_group\_name](#input\_aks\_admins\_aad\_group\_name) | Name an existing Azure AD group for AKS admins | `string` | `"AKS-Admins"` | no | +| [aks\_config\_path](#input\_aks\_config\_path) | n/a | `string` | `"./azurek8s_config"` | no | +| [aks\_container\_insights\_enabled](#input\_aks\_container\_insights\_enabled) | Should Container Insights monitoring be enabled | `bool` | `true` | no | +| [akv2k8s\_chart\_version](#input\_akv2k8s\_chart\_version) | https://github.com/SparebankenVest/azure-key-vault-to-kubernetes https://github.com/SparebankenVest/helm-charts/tree/gh-pages/akv2k8s https://github.com/SparebankenVest/public-helm-charts/blob/master/stable/akv2k8s/Chart.yaml#L5 helm search repo spv-charts/akv2k8s | `string` | `"2.2.2"` | no | +| [argocd\_admin\_password](#input\_argocd\_admin\_password) | n/a | `string` | `"__ARGOCD_ADMIN_PASSWORD__"` | no | +| [argocd\_admins\_aad\_group\_name](#input\_argocd\_admins\_aad\_group\_name) | argo cd | `string` | `"ArgoCD_Admins"` | no | +| [argocd\_app\_reg\_name](#input\_argocd\_app\_reg\_name) | n/a | `string` | `"sp_argocd_oidc"` | no | +| [argocd\_apps\_path](#input\_argocd\_apps\_path) | n/a | `string` | `"files/argocd-apps.yaml"` | no | +| [argocd\_cert\_sync\_yaml\_path](#input\_argocd\_cert\_sync\_yaml\_path) | n/a | `string` | `"files/argocd-akvs-certificate-sync.yaml"` | no | +| [argocd\_chart\_version](#input\_argocd\_chart\_version) | argo cd https://github.com/argoproj/argo-helm/blob/master/charts/argo-cd/Chart.yaml#L5 helm search repo argo/argo-cd helm search repo -l argo/argo-cd \| head -n 20 * also update terraform/helm/argocd\_default\_values.yaml | `string` | `"5.6.0"` | no | +| [argocd\_cm\_yaml\_path](#input\_argocd\_cm\_yaml\_path) | n/a | `string` | `"files/argocd-cm-patch.tmpl.yaml"` | no | +| [argocd\_fqdn](#input\_argocd\_fqdn) | n/a | `string` | `"__ARGOCD_FQDN__"` | no | +| [argocd\_image\_tag](#input\_argocd\_image\_tag) | https://hub.docker.com/r/argoproj/argocd/tags * also update cli version: terraform/files/scripts/argocd\_config.sh#L22 | `string` | `"v2.4.15"` | no | +| [argocd\_rbac\_cm\_yaml\_path](#input\_argocd\_rbac\_cm\_yaml\_path) | n/a | `string` | `"files/argocd-rbac-cm-patch.tmpl.yaml"` | no | +| [argocd\_secret\_yaml\_path](#input\_argocd\_secret\_yaml\_path) | n/a | `string` | `"files/argocd-secret-patch.tmpl.yaml"` | no | +| [azure\_resourcegroup\_name](#input\_azure\_resourcegroup\_name) | n/a | `string` | `"__AKS_RG_NAME__"` | no | +| [azureidentity\_external\_dns\_yaml\_path](#input\_azureidentity\_external\_dns\_yaml\_path) | n/a | `string` | `"files/azureIdentity-external-dns.yaml.tpl"` | no | +| [azurerm\_kubernetes\_cluster\_name](#input\_azurerm\_kubernetes\_cluster\_name) | AKS | `string` | `"__AKS_CLUSTER_NAME__"` | no | +| [cert\_manager\_chart\_version](#input\_cert\_manager\_chart\_version) | https://hub.helm.sh/charts/jetstack/cert-manager helm search repo jetstack/cert-manager | `string` | `"v1.10.0"` | no | +| [dns\_resource\_group\_name](#input\_dns\_resource\_group\_name) | DNS | `string` | `"__DNS_RG_NAME__"` | no | +| [dns\_zone\_name](#input\_dns\_zone\_name) | n/a | `string` | `"__ROOT_DOMAIN_NAME__"` | no | +| [external\_dns\_chart\_version](#input\_external\_dns\_chart\_version) | https://bitnami.com/stack/external-dns/helm https://github.com/bitnami/charts/blob/master/bitnami/external-dns/Chart.yaml helm search repo bitnami/external-dns helm search repo -l bitnami/external-dns | `string` | `"6.10.2"` | no | +| [func\_app\_sas\_expires\_in\_hours](#input\_func\_app\_sas\_expires\_in\_hours) | Function Apps | `string` | `"2190h"` | no | +| [gitlab\_cert\_sync\_yaml\_path](#input\_gitlab\_cert\_sync\_yaml\_path) | gitlab | `string` | `"files/gitlab-akvs-certificate-sync.yaml"` | no | +| [helm\_chart\_repo\_deploy\_private\_key](#input\_helm\_chart\_repo\_deploy\_private\_key) | n/a | `string` | `"__HELM_CHART_REPO_DEPLOY_PRIVATE_KEY__\n"` | no | +| [ifttt\_webhook\_key](#input\_ifttt\_webhook\_key) | n/a | `string` | `"__IFTTT_WEBHOOK_KEY__"` | no | +| [key\_vault\_name](#input\_key\_vault\_name) | n/a | `string` | `"__KEY_VAULT_NAME__"` | no | +| [key\_vault\_resource\_group\_name](#input\_key\_vault\_resource\_group\_name) | n/a | `string` | `"__KEY_VAULT_RESOURCE_GROUP_NAME__"` | no | +| [kubernetes\_version](#input\_kubernetes\_version) | region Versions version used for both main AKS API service, and default node pool https://github.com/Azure/AKS/releases az aks get-versions --location eastus --output table pwsh -Command "(az aks get-versions --location uksouth \| convertfrom-json).orchestrators \| where default" | `string` | `"1.23.12"` | no | +| [kured\_chart\_version](#input\_kured\_chart\_version) | https://github.com/kubereboot/charts/tree/main/charts/kured helm search repo kubereboot/kured | `string` | `"4.0.2"` | no | +| [kured\_image\_tag](#input\_kured\_image\_tag) | https://github.com/kubereboot/kured#kubernetes--os-compatibility | `string` | `"1.10.2"` | no | +| [location](#input\_location) | n/a | `string` | `"__LOCATION__"` | no | +| [log\_analytics\_workspace\_name](#input\_log\_analytics\_workspace\_name) | n/a | `string` | `"__PREFIX__-la-workspace-001"` | no | +| [nexus\_base\_domain](#input\_nexus\_base\_domain) | Nexus | `string` | `"__ROOT_DOMAIN_NAME__"` | no | +| [nexus\_cert\_email](#input\_nexus\_cert\_email) | n/a | `string` | `"__EMAIL_ADDRESS__"` | no | +| [nexus\_cert\_sync\_yaml\_path](#input\_nexus\_cert\_sync\_yaml\_path) | akv2k8s | `string` | `"files/nexus-akvs-certificate-sync.yaml"` | no | +| [nexus\_chart\_version](#input\_nexus\_chart\_version) | https://github.com/adamrushuk/charts/releases helm search repo adamrushuk/sonatype-nexus | `string` | `"0.3.1"` | no | +| [nexus\_image\_tag](#input\_nexus\_image\_tag) | https://hub.docker.com/r/sonatype/nexus3/tags | `string` | `"3.42.0"` | no | +| [nexus\_ingress\_enabled](#input\_nexus\_ingress\_enabled) | n/a | `string` | `"__ENABLE_TLS_INGRESS__"` | no | +| [nexus\_letsencrypt\_environment](#input\_nexus\_letsencrypt\_environment) | n/a | `string` | `"__CERT_API_ENVIRONMENT__"` | no | +| [nexus\_tls\_secret\_name](#input\_nexus\_tls\_secret\_name) | n/a | `string` | `"__K8S_TLS_SECRET_NAME__"` | no | +| [nginx\_chart\_version](#input\_nginx\_chart\_version) | Helm charts https://github.com/kubernetes/ingress-nginx/releases helm repo update helm search repo ingress-nginx/ingress-nginx helm search repo -l ingress-nginx/ingress-nginx \| head -5 | `string` | `"4.3.0"` | no | +| [prefix](#input\_prefix) | Common | `string` | `"__PREFIX__"` | no | +| [sla\_sku](#input\_sla\_sku) | Define the SLA under which the managed master control plane of AKS is running | `string` | `"Free"` | no | +| [ssh\_public\_key](#input\_ssh\_public\_key) | Public key for SSH access to the VMs | `string` | `""` | no | +| [tags](#input\_tags) | A map of the tags to use on the resources | `map` |
{
"Env": "Dev",
"Owner": "Adam Rush",
"Source": "terraform"
}
| no | +| [velero\_backup\_included\_namespaces](#input\_velero\_backup\_included\_namespaces) | n/a | `list(string)` |
[
"nexus"
]
| no | +| [velero\_backup\_retention](#input\_velero\_backup\_retention) | n/a | `string` | `"2h0m0s"` | no | +| [velero\_backup\_schedule](#input\_velero\_backup\_schedule) | Velero backup schedule in cron format | `string` | `"0 */1 * * *"` | no | +| [velero\_chart\_version](#input\_velero\_chart\_version) | https://github.com/vmware-tanzu/helm-charts/releases helm search repo vmware-tanzu/velero * also update terraform/helm/velero\_default\_values.yaml * also update terraform/helm/velero\_values.yaml | `string` | `"2.32.1"` | no | +| [velero\_enabled](#input\_velero\_enabled) | Should Velero be enabled | `string` | `"__VELERO_ENABLED__"` | no | +| [velero\_image\_tag](#input\_velero\_image\_tag) | https://hub.docker.com/r/velero/velero/tags | `string` | `"v1.9.2"` | no | +| [velero\_service\_principle\_name](#input\_velero\_service\_principle\_name) | n/a | `string` | `"sp_velero"` | no | +| [velero\_storage\_account\_name](#input\_velero\_storage\_account\_name) | n/a | `string` | `"__VELERO_STORAGE_ACCOUNT__"` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [aks\_credentials\_command](#output\_aks\_credentials\_command) | n/a | +| [aks\_node\_resource\_group](#output\_aks\_node\_resource\_group) | n/a | diff --git a/terraform/aad_pod_identity_helm.tf b/terraform/aad_pod_identity_helm.tf index fd842115..672b06e1 100644 --- a/terraform/aad_pod_identity_helm.tf +++ b/terraform/aad_pod_identity_helm.tf @@ -3,14 +3,14 @@ # role assignment for aad-pod-identity # https://azure.github.io/aad-pod-identity/docs/getting-started/role-assignment/#performing-role-assignments resource "azurerm_role_assignment" "aks_mi_aks_node_rg_vm_contributor" { - principal_id = module.aks.kubelet_identity[0].object_id + principal_id = azurerm_kubernetes_cluster.aks.kubelet_identity[0].object_id role_definition_name = "Virtual Machine Contributor" scope = data.azurerm_resource_group.aks_node_rg.id skip_service_principal_aad_check = true } resource "azurerm_role_assignment" "aks_mi_aks_node_rg_mi_operator" { - principal_id = module.aks.kubelet_identity[0].object_id + principal_id = azurerm_kubernetes_cluster.aks.kubelet_identity[0].object_id role_definition_name = "Managed Identity Operator" scope = data.azurerm_resource_group.aks_node_rg.id skip_service_principal_aad_check = true @@ -33,7 +33,7 @@ resource "kubernetes_namespace" "aad_pod_identity" { delete = "15m" } - depends_on = [module.aks] + depends_on = [azurerm_kubernetes_cluster.aks] } # https://www.terraform.io/docs/providers/helm/r/release.html diff --git a/terraform/aks.tf b/terraform/aks.tf index 42b9d78c..964d0a80 100644 --- a/terraform/aks.tf +++ b/terraform/aks.tf @@ -1,3 +1,6 @@ +# AKS +# https://registry.terraform.io/modules/adamrushuk/aks/azurerm/latest + # Common resource "tls_private_key" "ssh" { algorithm = "RSA" @@ -8,12 +11,6 @@ resource "azurerm_resource_group" "aks" { name = var.azure_resourcegroup_name location = var.location tags = var.tags - - lifecycle { - ignore_changes = [ - tags - ] - } } # Log Analytics @@ -24,15 +21,9 @@ resource "azurerm_log_analytics_workspace" "aks" { name = var.log_analytics_workspace_name location = azurerm_resource_group.aks.location resource_group_name = azurerm_resource_group.aks.name - sku = "Free" - retention_in_days = 7 + sku = "PerGB2018" + retention_in_days = 30 tags = var.tags - - lifecycle { - ignore_changes = [ - tags - ] - } } resource "azurerm_log_analytics_solution" "aks" { @@ -50,37 +41,142 @@ resource "azurerm_log_analytics_solution" "aks" { } } -# AKS -# https://registry.terraform.io/modules/adamrushuk/aks/azurerm/latest -module "aks" { - source = "adamrushuk/aks/azurerm" - version = "~> 0.11.0" - - kubernetes_version = var.kubernetes_version - location = azurerm_resource_group.aks.location - resource_group_name = azurerm_resource_group.aks.name - name = var.azurerm_kubernetes_cluster_name - sla_sku = var.sla_sku - aad_auth_enabled = true - azure_policy_enabled = false - tags = var.tags +# NOTE: Requires "Azure Active Directory Graph" "Directory.ReadWrite.All" Application API permission to create, and +# also requires "User Access Administrator" role to delete +# ! You can assign one of the required Azure Active Directory Roles with the AzureAD PowerShell Module +# https://registry.terraform.io/providers/hashicorp/azuread/latest/docs/resources/group +resource "azuread_group" "aks_admins" { + display_name = "${var.azurerm_kubernetes_cluster_name}-aks-administrators" + description = "${var.azurerm_kubernetes_cluster_name} Kubernetes cluster administrators" + prevent_duplicate_names = true + security_enabled = true +} - # override defaults - default_node_pool = { - name = var.agent_pool_profile_name - count = var.agent_pool_node_count +# https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster +resource "azurerm_kubernetes_cluster" "aks" { + name = var.azurerm_kubernetes_cluster_name + location = azurerm_resource_group.aks.location + resource_group_name = azurerm_resource_group.aks.name + dns_prefix = var.azurerm_kubernetes_cluster_name + kubernetes_version = var.kubernetes_version + sku_tier = "Free" + role_based_access_control_enabled = true + tags = var.tags + + default_node_pool { + name = "default" orchestrator_version = var.kubernetes_version vm_size = var.agent_pool_profile_vm_size - enable_auto_scaling = var.agent_pool_enable_auto_scaling - max_count = var.agent_pool_node_max_count + node_count = 1 max_pods = 90 - min_count = var.agent_pool_node_min_count - os_disk_size_gb = var.agent_pool_profile_disk_size_gb } - # add-ons - log_analytics_workspace_id = var.aks_container_insights_enabled == true ? azurerm_log_analytics_workspace.aks[0].id : "" + linux_profile { + admin_username = var.admin_username + + ssh_key { + key_data = chomp( + coalesce( + var.ssh_public_key, + tls_private_key.ssh.public_key_openssh, + ) + ) + } + } + + # managed identity block + # https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster#identity + identity { + type = "SystemAssigned" + } - # Add existing group to the new AKS cluster admin group - aks_admin_group_member_name = var.aks_admins_aad_group_name + # https://docs.microsoft.com/en-us/azure/aks/azure-ad-rbac + azure_active_directory_role_based_access_control { + managed = true + admin_group_object_ids = [ + azuread_group.aks_admins.id + ] + } + + # https://docs.microsoft.com/en-ie/azure/governance/policy/concepts/policy-for-kubernetes + azure_policy_enabled = false + + # https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster#oms_agent + # conditional dynamic block + dynamic "oms_agent" { + for_each = var.aks_container_insights_enabled == true ? [1] : [] + content { + log_analytics_workspace_id = azurerm_log_analytics_workspace.aks[0].id + } + } + + # https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster#network_plugin + network_profile { + load_balancer_sku = "basic" + outbound_type = "loadBalancer" + network_plugin = "azure" + network_policy = "azure" + service_cidr = "10.0.0.0/16" + dns_service_ip = "10.0.0.10" + docker_bridge_cidr = "172.17.0.1/16" + } + + # https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster#workload_identity_enabled + # https://learn.microsoft.com/en-us/azure/aks/workload-identity-deploy-cluster#register-the-enableworkloadidentitypreview-feature-flag + oidc_issuer_enabled = true + workload_identity_enabled = true } + +# Add role to access AKS Resource View +# https://docs.microsoft.com/en-us/azure/aks/kubernetes-portal +resource "azurerm_role_assignment" "aks_portal_resource_view" { + principal_id = azuread_group.aks_admins.id + role_definition_name = "Azure Kubernetes Service RBAC Cluster Admin" + scope = azurerm_kubernetes_cluster.aks.id +} + +# Add existing AAD group as a member to the -aks-administrators group +data "azuread_group" "existing_aks_admins" { + display_name = var.aks_admins_aad_group_name + security_enabled = true +} + +resource "azuread_group_member" "existing_aks_admins" { + group_object_id = azuread_group.aks_admins.id + member_object_id = data.azuread_group.existing_aks_admins.id +} + +# AKS module +# https://registry.terraform.io/modules/adamrushuk/aks/azurerm/latest +# module "aks" { +# source = "adamrushuk/aks/azurerm" +# version = "~> 1.1.0" + +# kubernetes_version = var.kubernetes_version +# location = azurerm_resource_group.aks.location +# resource_group_name = azurerm_resource_group.aks.name +# name = var.azurerm_kubernetes_cluster_name +# sla_sku = var.sla_sku +# aad_auth_enabled = true +# azure_policy_enabled = true +# tags = var.tags + +# # override defaults +# default_node_pool = { +# name = var.agent_pool_profile_name +# count = var.agent_pool_node_count +# orchestrator_version = var.kubernetes_version +# vm_size = var.agent_pool_profile_vm_size +# enable_auto_scaling = var.agent_pool_enable_auto_scaling +# max_count = var.agent_pool_node_max_count +# max_pods = 90 +# min_count = var.agent_pool_node_min_count +# os_disk_size_gb = var.agent_pool_profile_disk_size_gb +# } + +# # add-ons +# log_analytics_workspace_id = var.aks_container_insights_enabled == true ? azurerm_log_analytics_workspace.aks[0].id : "" + +# # Add existing group to the new AKS cluster admin group +# aks_admin_group_member_name = var.aks_admins_aad_group_name +# } diff --git a/terraform/akv2k8s_helm.tf b/terraform/akv2k8s_helm.tf index 1ff38c6e..312dc22f 100644 --- a/terraform/akv2k8s_helm.tf +++ b/terraform/akv2k8s_helm.tf @@ -16,7 +16,7 @@ data "azurerm_key_vault" "kv" { # key_vault_id = data.azurerm_key_vault.kv.id # tenant_id = data.azurerm_subscription.current.tenant_id -# object_id = module.aks.kubelet_identity[0].object_id +# object_id = azurerm_kubernetes_cluster.aks.kubelet_identity[0].object_id # certificate_permissions = [ # "get" @@ -36,31 +36,32 @@ data "azurerm_key_vault" "kv" { resource "azurerm_role_assignment" "aks_mi_kv_certs" { scope = data.azurerm_key_vault.kv.id role_definition_name = "Key Vault Certificates Officer" - principal_id = module.aks.kubelet_identity[0].object_id + principal_id = azurerm_kubernetes_cluster.aks.kubelet_identity[0].object_id description = "Perform any action on the keys of a key vault, except manage permissions" } resource "azurerm_role_assignment" "aks_mi_kv_keys" { scope = data.azurerm_key_vault.kv.id role_definition_name = "Key Vault Crypto User" - principal_id = module.aks.kubelet_identity[0].object_id + principal_id = azurerm_kubernetes_cluster.aks.kubelet_identity[0].object_id description = "Perform cryptographic operations using keys" } resource "azurerm_role_assignment" "aks_mi_kv_secrets" { scope = data.azurerm_key_vault.kv.id role_definition_name = "Key Vault Secrets User" - principal_id = module.aks.kubelet_identity[0].object_id + principal_id = azurerm_kubernetes_cluster.aks.kubelet_identity[0].object_id description = "Provides read-only access to secret contents" } # Requires "kube_admin_config_raw" as has AAD Auth enabled # https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster#kube_admin_config_raw -resource "local_file" "kubeconfig" { - sensitive_content = module.aks.full_object.kube_admin_config_raw - filename = var.aks_config_path +# https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/sensitive_file +resource "local_sensitive_file" "kubeconfig" { + content = azurerm_kubernetes_cluster.aks.kube_admin_config_raw + filename = var.aks_config_path - depends_on = [module.aks] + depends_on = [azurerm_kubernetes_cluster.aks] } # https://www.terraform.io/docs/providers/kubernetes/r/namespace.html @@ -72,7 +73,7 @@ resource "kubernetes_namespace" "akv2k8s" { delete = "15m" } - depends_on = [module.aks] + depends_on = [azurerm_kubernetes_cluster.aks] } # https://www.terraform.io/docs/providers/helm/r/release.html diff --git a/terraform/argocd_helm.tf b/terraform/argocd_helm.tf index d72f4219..1d2b70ff 100644 --- a/terraform/argocd_helm.tf +++ b/terraform/argocd_helm.tf @@ -11,7 +11,7 @@ resource "kubernetes_namespace" "argocd" { delete = "15m" } - depends_on = [module.aks] + depends_on = [azurerm_kubernetes_cluster.aks] } # https://www.terraform.io/docs/provisioners/local-exec.html @@ -32,7 +32,7 @@ resource "null_resource" "argocd_cert_sync" { } depends_on = [ - local_file.kubeconfig, + local_sensitive_file.kubeconfig, helm_release.akv2k8s, kubernetes_namespace.argocd ] @@ -100,7 +100,7 @@ resource "null_resource" "argocd_configure" { } depends_on = [ - local_file.kubeconfig, + local_sensitive_file.kubeconfig, helm_release.argocd ] } @@ -123,7 +123,7 @@ resource "null_resource" "argocd_apps" { } depends_on = [ - local_file.kubeconfig, + local_sensitive_file.kubeconfig, null_resource.argocd_configure ] } diff --git a/terraform/argocd_sso.tf b/terraform/argocd_sso.tf index 17953161..f8677672 100644 --- a/terraform/argocd_sso.tf +++ b/terraform/argocd_sso.tf @@ -112,7 +112,7 @@ resource "null_resource" "argocd_cm" { } depends_on = [ - local_file.kubeconfig, + local_sensitive_file.kubeconfig, null_resource.argocd_configure ] } @@ -145,7 +145,7 @@ resource "null_resource" "argocd_secret" { } depends_on = [ - local_file.kubeconfig, + local_sensitive_file.kubeconfig, null_resource.argocd_configure ] } @@ -181,7 +181,7 @@ resource "null_resource" "argocd_rbac_cm" { } depends_on = [ - local_file.kubeconfig, + local_sensitive_file.kubeconfig, null_resource.argocd_configure ] } diff --git a/terraform/data.tf b/terraform/data.tf index d251f4d8..701481cb 100644 --- a/terraform/data.tf +++ b/terraform/data.tf @@ -2,5 +2,5 @@ data "azurerm_subscription" "current" {} data "azurerm_resource_group" "aks_node_rg" { - name = module.aks.node_resource_group + name = azurerm_kubernetes_cluster.aks.node_resource_group } diff --git a/terraform/dns.tf b/terraform/dns.tf index 83978950..e4129ae6 100644 --- a/terraform/dns.tf +++ b/terraform/dns.tf @@ -10,7 +10,7 @@ data "azurerm_dns_zone" "dns" { # external-dns managed identity resource "azurerm_user_assigned_identity" "external_dns" { - resource_group_name = module.aks.node_resource_group + resource_group_name = azurerm_kubernetes_cluster.aks.node_resource_group location = var.location name = "mi-external-dns" } @@ -40,7 +40,7 @@ resource "kubernetes_namespace" "external_dns" { delete = "15m" } - depends_on = [module.aks] + depends_on = [azurerm_kubernetes_cluster.aks] } # https://www.terraform.io/docs/provisioners/local-exec.html @@ -67,7 +67,7 @@ resource "null_resource" "azureIdentity_external_dns" { } depends_on = [ - local_file.kubeconfig, + local_sensitive_file.kubeconfig, kubernetes_namespace.external_dns, helm_release.aad_pod_identity ] diff --git a/terraform/examples/function-app/.vscode/extensions.json b/terraform/examples/function-app/.vscode/extensions.json new file mode 100644 index 00000000..f9151195 --- /dev/null +++ b/terraform/examples/function-app/.vscode/extensions.json @@ -0,0 +1,6 @@ +{ + "recommendations": [ + "ms-azuretools.vscode-azurefunctions", + "ms-vscode.PowerShell" + ] +} diff --git a/terraform/examples/function-app/.vscode/launch.json b/terraform/examples/function-app/.vscode/launch.json new file mode 100644 index 00000000..4027c7e3 --- /dev/null +++ b/terraform/examples/function-app/.vscode/launch.json @@ -0,0 +1,13 @@ +{ + "version": "0.2.0", + "configurations": [ + { + "name": "Attach to PowerShell Functions", + "type": "PowerShell", + "request": "attach", + "customPipeName": "AzureFunctionsPSWorker", + "runspaceId": 1, + "preLaunchTask": "func: host start" + } + ] +} diff --git a/terraform/examples/function-app/.vscode/settings.json b/terraform/examples/function-app/.vscode/settings.json new file mode 100644 index 00000000..5f365164 --- /dev/null +++ b/terraform/examples/function-app/.vscode/settings.json @@ -0,0 +1,6 @@ +{ + "azureFunctions.deploySubpath": "functions", + "azureFunctions.projectLanguage": "PowerShell", + "azureFunctions.projectRuntime": "~4", + "debug.internalConsoleOptions": "neverOpen" +} diff --git a/terraform/examples/function-app/.vscode/tasks.json b/terraform/examples/function-app/.vscode/tasks.json new file mode 100644 index 00000000..1b46dd83 --- /dev/null +++ b/terraform/examples/function-app/.vscode/tasks.json @@ -0,0 +1,14 @@ +{ + "version": "2.0.0", + "tasks": [ + { + "type": "func", + "command": "host start", + "problemMatcher": "$func-powershell-watch", + "isBackground": true, + "options": { + "cwd": "${workspaceFolder}/functions" + } + } + ] +} diff --git a/terraform/examples/function-app/README.md b/terraform/examples/function-app/README.md new file mode 100644 index 00000000..de04b519 --- /dev/null +++ b/terraform/examples/function-app/README.md @@ -0,0 +1,29 @@ +# powershell function app example + +**IMPORTANT**: It can take a while for the `HttpTrigger1` function to show within the `Function App > Function` screen. + +## usage + +```bash +# login via service principle +azh + +# init +cd terraform/examples/function-app +terraform init + +# show plan and apply +terraform apply + +# show outputs +terraform output function +terraform output function_url + +# test function +eval curl $(terraform output --raw function_url)?Name=Adam +eval curl $(terraform output --raw function_url)?Name=Tasha + + +# CLEANUP +terraform destroy +``` diff --git a/terraform/examples/function-app/functions/.funcignore b/terraform/examples/function-app/functions/.funcignore new file mode 100644 index 00000000..8817d3fd --- /dev/null +++ b/terraform/examples/function-app/functions/.funcignore @@ -0,0 +1,4 @@ +.git* +.vscode +local.settings.json +test diff --git a/terraform/examples/function-app/functions/.gitignore b/terraform/examples/function-app/functions/.gitignore new file mode 100644 index 00000000..b5c0e849 --- /dev/null +++ b/terraform/examples/function-app/functions/.gitignore @@ -0,0 +1,11 @@ + +# Azure Functions artifacts +bin +obj +appsettings.json +local.settings.json + +# Azurite artifacts +__blobstorage__ +__queuestorage__ +__azurite_db*__.json diff --git a/terraform/examples/function-app/functions/HttpTrigger1/function.json b/terraform/examples/function-app/functions/HttpTrigger1/function.json new file mode 100644 index 00000000..a30d6d5a --- /dev/null +++ b/terraform/examples/function-app/functions/HttpTrigger1/function.json @@ -0,0 +1,19 @@ +{ + "bindings": [ + { + "authLevel": "anonymous", + "type": "httpTrigger", + "direction": "in", + "name": "Request", + "methods": [ + "get", + "post" + ] + }, + { + "type": "http", + "direction": "out", + "name": "Response" + } + ] +} diff --git a/terraform/examples/function-app/functions/HttpTrigger1/run.ps1 b/terraform/examples/function-app/functions/HttpTrigger1/run.ps1 new file mode 100644 index 00000000..ceaa2ad5 --- /dev/null +++ b/terraform/examples/function-app/functions/HttpTrigger1/run.ps1 @@ -0,0 +1,25 @@ +using namespace System.Net + +# Input bindings are passed in via param block. +param($Request, $TriggerMetadata) + +# Write to the Azure Functions log stream. +Write-Host "PowerShell HTTP trigger function processed a request." + +# Interact with query parameters or the body of the request. +$name = $Request.Query.Name +if (-not $name) { + $name = $Request.Body.Name +} + +$body = "This HTTP triggered function executed successfully. Pass a name in the query string or in the request body for a personalized response." + +if ($name) { + $body = "Hello, $name. This HTTP triggered function executed successfully." +} + +# Associate values to output bindings by calling 'Push-OutputBinding'. +Push-OutputBinding -Name Response -Value ([HttpResponseContext]@{ + StatusCode = [HttpStatusCode]::OK + Body = $body +}) diff --git a/terraform/examples/function-app/functions/HttpTrigger1/sample.dat b/terraform/examples/function-app/functions/HttpTrigger1/sample.dat new file mode 100644 index 00000000..68f5a0bb --- /dev/null +++ b/terraform/examples/function-app/functions/HttpTrigger1/sample.dat @@ -0,0 +1,3 @@ +{ + "name": "Azure" +} diff --git a/terraform/examples/function-app/functions/host.json b/terraform/examples/function-app/functions/host.json new file mode 100644 index 00000000..6ae17b86 --- /dev/null +++ b/terraform/examples/function-app/functions/host.json @@ -0,0 +1,18 @@ +{ + "version": "2.0", + "logging": { + "applicationInsights": { + "samplingSettings": { + "isEnabled": true, + "excludedTypes": "Request" + } + } + }, + "extensionBundle": { + "id": "Microsoft.Azure.Functions.ExtensionBundle", + "version": "[2.*, 3.0.0)" + }, + "managedDependency": { + "enabled": true + } +} diff --git a/terraform/examples/function-app/functions/profile.ps1 b/terraform/examples/function-app/functions/profile.ps1 new file mode 100644 index 00000000..1670fc99 --- /dev/null +++ b/terraform/examples/function-app/functions/profile.ps1 @@ -0,0 +1,22 @@ +# Azure Functions profile.ps1 +# +# This profile.ps1 will get executed every "cold start" of your Function App. +# "cold start" occurs when: +# +# * A Function App starts up for the very first time +# * A Function App starts up after being de-allocated due to inactivity +# +# You can define helper functions, run commands, or specify environment variables +# NOTE: any variables defined that are not environment variables will get reset after the first execution + +# Authenticate with Azure PowerShell using MSI. +# Remove this if you are not planning on using MSI or Azure PowerShell. +if ($env:MSI_SECRET) { + Disable-AzContextAutosave -Scope Process | Out-Null + Connect-AzAccount -Identity +} + +# Uncomment the next line to enable legacy AzureRm alias in Azure PowerShell. +# Enable-AzureRmAlias + +# You can also define functions or aliases that can be referenced in any of your PowerShell functions. diff --git a/terraform/examples/function-app/functions/requirements.psd1 b/terraform/examples/function-app/functions/requirements.psd1 new file mode 100644 index 00000000..dd10ff53 --- /dev/null +++ b/terraform/examples/function-app/functions/requirements.psd1 @@ -0,0 +1,8 @@ +# This file enables modules to be automatically managed by the Functions service. +# See https://aka.ms/functionsmanageddependency for additional information. +# +@{ + # For latest supported version, go to 'https://www.powershellgallery.com/packages/Az'. + # To use the Az module in your function app, please uncomment the line below. + # 'Az' = '8.*' +} diff --git a/terraform/examples/function-app/main.tf b/terraform/examples/function-app/main.tf new file mode 100644 index 00000000..ba784114 --- /dev/null +++ b/terraform/examples/function-app/main.tf @@ -0,0 +1,125 @@ +# function app example + +# providers +provider "azurerm" { + features {} +} +terraform { + required_version = ">= 1.0" + required_providers { + # https://github.com/terraform-providers/terraform-provider-azurerm/releases + azurerm = { + source = "hashicorp/azurerm" + version = "~> 3.8.0" + } + archive = { + source = "hashicorp/archive" + version = "2.2.0" + } + } +} + +locals { + region = "uksouth" + resource_group_name = "az-func-example" + storage_account_name = "arlinuxfunctionappps" + storage_container_name = "function-releases" + app_service_plan_name = "example-app-service-plan" + function_app_name = "arush-linux-function-app" + function_source_path = "./functions" + function_name = "HttpTrigger1" +} + +resource "azurerm_resource_group" "example" { + name = local.resource_group_name + location = local.region +} + +resource "azurerm_storage_account" "example" { + name = local.storage_account_name + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + account_tier = "Standard" + account_replication_type = "LRS" + enable_https_traffic_only = true + min_tls_version = "TLS1_2" +} + +resource "azurerm_storage_container" "example" { + name = local.storage_container_name + storage_account_name = azurerm_storage_account.example.name + container_access_type = "private" +} + +data "archive_file" "example" { + type = "zip" + source_dir = local.function_source_path + output_path = "function_release.zip" +} + +resource "azurerm_storage_blob" "example" { + # The name of the file will be "filehash.zip" where file hash is the SHA256 hash of the file. + name = "${filesha256(data.archive_file.example.output_path)}.zip" + source = data.archive_file.example.output_path + storage_account_name = azurerm_storage_account.example.name + storage_container_name = azurerm_storage_container.example.name + type = "Block" +} + +resource "azurerm_service_plan" "example" { + name = local.app_service_plan_name + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + os_type = "Linux" + sku_name = "Y1" +} + +resource "azurerm_linux_function_app" "example" { + name = local.function_app_name + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + enabled = true + storage_account_name = azurerm_storage_account.example.name + service_plan_id = azurerm_service_plan.example.id + storage_uses_managed_identity = true + + identity { + type = "SystemAssigned" + } + + site_config { + # vnet_route_all_enabled = true + # application_insights_key = azurerm_application_insights.application_insights.instrumentation_key + # http2_enabled = true + + application_stack { + powershell_core_version = 7.2 + } + } + + app_settings = { + "WEBSITE_RUN_FROM_PACKAGE" = azurerm_storage_blob.example.url + } + + lifecycle { + # required to ignore the auto-generated "hidden-link:" tags + ignore_changes = [ + tags + ] + } +} + +resource "azurerm_role_assignment" "example" { + principal_id = azurerm_linux_function_app.example.identity[0].principal_id + role_definition_name = "Storage Blob Data Contributor" + scope = azurerm_storage_account.example.id +} + +output "function" { + value = azurerm_linux_function_app.example + sensitive = true +} + +output "function_url" { + value = "https://${azurerm_linux_function_app.example.name}.azurewebsites.net/api/${local.function_name}" +} diff --git a/terraform/examples/module-dependency/README.md b/terraform/examples/module-dependency/README.md new file mode 100644 index 00000000..3fef396a --- /dev/null +++ b/terraform/examples/module-dependency/README.md @@ -0,0 +1,29 @@ +# role assignment example + +## usage + +```bash +# login via service principle +azh + +# init +cd terraform/examples/module-dependency +terraform init + +# show plan and apply +terraform apply + +# change role definition permissions, then apply changes +# this should show "~ update in-place" changes +terraform apply + +# test locals +terraform console +local.custom_contributor_default_not_actions +local.nsg_right_allowed_actions +local.nsg_not_actions + + +# CLEANUP +terraform destroy +``` diff --git a/terraform/examples/module-dependency/aad-group/aad-group.tf b/terraform/examples/module-dependency/aad-group/aad-group.tf new file mode 100644 index 00000000..92e4be54 --- /dev/null +++ b/terraform/examples/module-dependency/aad-group/aad-group.tf @@ -0,0 +1,18 @@ +# testing module dependency +data "azurerm_subscription" "current" {} + +resource "azuread_group" "elevated_group" { + display_name = "${data.azurerm_subscription.current.display_name}_Elevated_Ops" + security_enabled = true + prevent_duplicate_names = true +} + +output "aad_elevated_group" { + value = azuread_group.elevated_group.id + description = "Id of the AAD Elevated Ops group for the subscription" +} + +output "aad_group_name" { + value = azuread_group.elevated_group.display_name + description = "Id of the AAD Elevated Ops group for the subscription" +} diff --git a/terraform/examples/module-dependency/main.tf b/terraform/examples/module-dependency/main.tf new file mode 100644 index 00000000..d14135cc --- /dev/null +++ b/terraform/examples/module-dependency/main.tf @@ -0,0 +1,33 @@ +# testing module dependency + +# providers +provider "azurerm" { + features {} +} +terraform { + required_version = ">= 0.13" + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "~> 2.86.0" + } + } +} + +# module "aad_group +module "aad_group" { + source = "./aad-group" +} + +# module "aad_group +module "rg" { + source = "./rg" + # object_id = module.aad_group.aad_elevated_group + object_id = module.aad_group.aad_group_name +} + +# resource "azurerm_resource_group" "example" { +# count = var.object_id == "" ? 0 : 1 +# name = module.aad_group.aad_elevated_group +# location = "uksouth" +# } diff --git a/terraform/examples/module-dependency/rg/rg.tf b/terraform/examples/module-dependency/rg/rg.tf new file mode 100644 index 00000000..f8a2e8f5 --- /dev/null +++ b/terraform/examples/module-dependency/rg/rg.tf @@ -0,0 +1,9 @@ +variable "object_id" { + default = "" +} + +resource "azurerm_resource_group" "example" { + count = var.object_id == "" ? 0 : 1 + name = var.object_id + location = "uksouth" +} diff --git a/terraform/examples/role-assignment/README.md b/terraform/examples/role-assignment/README.md new file mode 100644 index 00000000..fa44e8e9 --- /dev/null +++ b/terraform/examples/role-assignment/README.md @@ -0,0 +1,29 @@ +# role assignment example + +## usage + +```bash +# login via service principle +azh + +# init +cd terraform/examples/role-assignment +terraform init + +# show plan and apply +terraform apply + +# change role definition permissions, then apply changes +# this should show "~ update in-place" changes +terraform apply + +# test locals +terraform console +local.custom_contributor_default_not_actions +local.nsg_right_allowed_actions +local.nsg_not_actions + + +# CLEANUP +terraform destroy +``` diff --git a/terraform/examples/role-assignment/main.tf b/terraform/examples/role-assignment/main.tf new file mode 100644 index 00000000..9d96e128 --- /dev/null +++ b/terraform/examples/role-assignment/main.tf @@ -0,0 +1,92 @@ +# test modifying a role def after a role assignment exists + +# providers +provider "azurerm" { + features {} +} +terraform { + required_version = ">= 0.13" + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "~> 2.86.0" + } + } +} + +# vars +variable "nsg_rights_enabled" { + description = "additional rights for nsg usage" + default = false +} + +locals { + default_custom_not_actions = [ + "Microsoft.Authorization/*/Delete", + "Microsoft.Authorization/*/Write", + "Microsoft.Authorization/elevateAccess/Action", + "Microsoft.Blueprint/blueprintAssignments/delete", + "Microsoft.Blueprint/blueprintAssignments/write", + "Microsoft.Network/networkSecurityGroups/delete", + "Microsoft.Network/networkSecurityGroups/join/action", + "Microsoft.Network/networkSecurityGroups/securityRules/delete", + "Microsoft.Network/networkSecurityGroups/securityRules/write", + "Microsoft.Network/networkSecurityGroups/write", + "Microsoft.Network/publicIPAddresses/delete", + "Microsoft.Network/publicIPAddresses/join/action", + "Microsoft.Network/publicIPAddresses/write", + "Microsoft.Network/publicIPPrefixes/delete", + "Microsoft.Network/publicIPPrefixes/join/action", + "Microsoft.Network/publicIPPrefixes/write", + "Microsoft.Network/routeTables/*/delete", + "Microsoft.Network/routeTables/*/write", + "Microsoft.Network/virtualNetworks/*/delete", + "Microsoft.Network/virtualNetworks/*/write", + "Microsoft.Subscription/cancel/action", + "Microsoft.Subscription/rename/action", + ] + + nsg_rights_allowed_actions = [ + "Microsoft.Network/networkSecurityGroups/delete", + "Microsoft.Network/networkSecurityGroups/join/action", + "Microsoft.Network/networkSecurityGroups/securityRules/delete", + "Microsoft.Network/networkSecurityGroups/securityRules/write", + "Microsoft.Network/networkSecurityGroups/write", + ] + + nsg_custom_not_actions = tolist(setsubtract(local.default_custom_not_actions, local.nsg_rights_allowed_actions)) +} + +# data sources +data "azurerm_subscription" "current" {} +data "azurerm_client_config" "current" {} + + + +# resources +resource "azurerm_role_definition" "custom" { + name = "Test-Role" + scope = data.azurerm_subscription.current.id + + permissions { + actions = [ + "Microsoft.Blueprint/blueprintAssignments/write", + "Microsoft.Resources/subscriptions/resourceGroups/read", + "Microsoft.Blueprint/blueprintAssignments/delete", + "Microsoft.Resources/subscriptions/resourceGroups/write", + ] + + # not_actions = var.nsg_rights_enabled ? local.nsg_custom_not_actions : local.default_custom_not_actions + } + + assignable_scopes = [ + data.azurerm_subscription.current.id, + ] +} + +resource "azurerm_role_assignment" "custom" { + scope = data.azurerm_subscription.current.id + role_definition_id = azurerm_role_definition.custom.role_definition_resource_id + # principal_id = data.azurerm_client_config.current.client_id + principal_id = "577321c0-cff2-4d20-b29e-5e775942b32a" +} diff --git a/terraform/examples/setproduct-multiple-list-variations/README.md b/terraform/examples/setproduct-multiple-list-variations/README.md new file mode 100644 index 00000000..d2399b9f --- /dev/null +++ b/terraform/examples/setproduct-multiple-list-variations/README.md @@ -0,0 +1,28 @@ +# setproduct example + +## usage + +```bash +# init +cd terraform/examples/setproduct-multiple-list-variations +terraform init + +# show plan +terraform plan + +# enter console +terraform console + +# output locals to view data structures +# show all list variations +local.role_scopes_product + +# show the map of lists +local.role_scopes_map_of_lists + +# show the nested map +local.role_scopes_map_of_maps + +# show plan +terraform plan +``` diff --git a/terraform/examples/setproduct-multiple-list-variations/main.tf b/terraform/examples/setproduct-multiple-list-variations/main.tf new file mode 100644 index 00000000..5d1f44e2 --- /dev/null +++ b/terraform/examples/setproduct-multiple-list-variations/main.tf @@ -0,0 +1,133 @@ +# Create all possible combinations from two lists, and loop through result to assign roles +# https://www.terraform.io/docs/language/functions/setproduct.html + +provider "azurerm" { + features {} +} + +terraform { + required_version = ">= 1.0" + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "~> 3.20.0" + } + } +} + +locals { + roles = [ + "Storage Blob Data Owner", + "Key Vault Contributor", + ] + scopes = [ + "/subscriptions/SUB_NAME/resourceGroups/rg1", + "/subscriptions/SUB_NAME/resourceGroups/rg2", + ] + + role_scopes_product = setproduct(local.roles, local.scopes) + + # Setproduct produces a structure like this for role_scopes_product: + # [ + # [ + # "Storage Blob Data Owner", + # "/subscriptions/SUB_NAME/resourceGroups/rg1", + # ], + # [ + # "Storage Blob Data Owner", + # "/subscriptions/SUB_NAME/resourceGroups/rg2", + # ], + # [ + # "Key Vault Contributor", + # "/subscriptions/SUB_NAME/resourceGroups/rg1", + # ], + # [ + # "Key Vault Contributor", + # "/subscriptions/SUB_NAME/resourceGroups/rg2", + # ], + # ] + + + # Build a map from the above "list of lists", using a compound key of both list values, and the map value being the original list of the role and scope + role_scopes_map_of_lists = { for role_scope in local.role_scopes_product : "${role_scope[0]}-${role_scope[1]}" => role_scope } + + # role_scopes_map_of_lists looks like this: + # { + # "Key Vault Contributor-/subscriptions/SUB_NAME/resourceGroups/rg1" = [ + # "Key Vault Contributor", + # "/subscriptions/SUB_NAME/resourceGroups/rg1", + # ] + # "Key Vault Contributor-/subscriptions/SUB_NAME/resourceGroups/rg2" = [ + # "Key Vault Contributor", + # "/subscriptions/SUB_NAME/resourceGroups/rg2", + # ] + # "Storage Blob Data Owner-/subscriptions/SUB_NAME/resourceGroups/rg1" = [ + # "Storage Blob Data Owner", + # "/subscriptions/SUB_NAME/resourceGroups/rg1", + # ] + # "Storage Blob Data Owner-/subscriptions/SUB_NAME/resourceGroups/rg2" = [ + # "Storage Blob Data Owner", + # "/subscriptions/SUB_NAME/resourceGroups/rg2", + # ] + # } + + + role_scopes_map_of_maps = { + for role_scope in local.role_scopes_product : "${role_scope[0]}-${role_scope[1]}" => { + "role_name" = role_scope[0], + "scope" = role_scope[1] + } + } + + # role_scopes_map_of_maps looks like this: + # { + # "Key Vault Contributor-/subscriptions/SUB_NAME/resourceGroups/rg1" = { + # "role_name" = "Key Vault Contributor" + # "scope" = "/subscriptions/SUB_NAME/resourceGroups/rg1" + # } + # "Key Vault Contributor-/subscriptions/SUB_NAME/resourceGroups/rg2" = { + # "role_name" = "Key Vault Contributor" + # "scope" = "/subscriptions/SUB_NAME/resourceGroups/rg2" + # } + # "Storage Blob Data Owner-/subscriptions/SUB_NAME/resourceGroups/rg1" = { + # "role_name" = "Storage Blob Data Owner" + # "scope" = "/subscriptions/SUB_NAME/resourceGroups/rg1" + # } + # "Storage Blob Data Owner-/subscriptions/SUB_NAME/resourceGroups/rg2" = { + # "role_name" = "Storage Blob Data Owner" + # "scope" = "/subscriptions/SUB_NAME/resourceGroups/rg2" + # } + # } +} + +# resource groups +resource "azurerm_resource_group" "rg1" { + name = "rg1" + location = "uksouth" +} + +resource "azurerm_resource_group" "rg2" { + name = "rg2" + location = "uksouth" +} + +data "azurerm_client_config" "current" {} +data "azuread_service_principal" "current" { + application_id = data.azurerm_client_config.current.client_id +} + +# maps of lists loop example +resource "azurerm_role_assignment" "map_of_lists" { + for_each = local.role_scopes_map_of_lists + scope = each.value[1] + role_definition_name = each.value[0] + principal_id = "MY_USER_ID" +} + +# maps of maps loop example +resource "azurerm_role_assignment" "map_of_maps" { + for_each = local.role_scopes_map_of_maps + scope = each.value.scope + role_definition_name = each.value.role_name + principal_id = data.azuread_service_principal.current.object_id +} diff --git a/terraform/examples/terrafy/README.md b/terraform/examples/terrafy/README.md new file mode 100644 index 00000000..bdf45cfe --- /dev/null +++ b/terraform/examples/terrafy/README.md @@ -0,0 +1,33 @@ +# Terrafy + +## Install + +Run my [Azure Terrafy install script](https://github.com/adamrushuk/tools-install/blob/master/aztfy.sh). + +## Create Resources + +Before running Azure Terrafy, some resources will need to exist. + +For this test, I created a `PowerShell Core 7.2 Function App` within a Resource Group called `rg-functionapp`. + +## Usage + + +```bash +# init +cd terraform/examples/terrafy +mkdir -p ./output + +# login to your account +az login + +# run aztfy +# aztfy [option] +aztfy -o ./output rg-functionapp + +# review the resources +# select any entries that are marked with "skip", press "enter" then input the Terraform resource address in form +# of . (e.g. "azurerm_storage_account.func_app") + +# press "w" to import into local state +``` diff --git a/terraform/files/function_app.zip b/terraform/files/function_app.zip deleted file mode 100644 index c1281075..00000000 --- a/terraform/files/function_app.zip +++ /dev/null @@ -1 +0,0 @@ -# PLACEHOLDER so validate doesnt fail on missing file diff --git a/terraform/files/scripts/argocd_config.sh b/terraform/files/scripts/argocd_config.sh index fb2d72cf..4c0b1b14 100644 --- a/terraform/files/scripts/argocd_config.sh +++ b/terraform/files/scripts/argocd_config.sh @@ -19,7 +19,7 @@ ARGOCD_HEALTH_CHECK_URL="https://$ARGOCD_FQDN/healthz" # Install # https://github.com/argoproj/argo-cd/releases/ -VERSION="v2.1.6" +VERSION="v2.5.9" curl -sSL -o "$ARGOCD_PATH" "https://github.com/argoproj/argo-cd/releases/download/$VERSION/argocd-linux-amd64" chmod +x "$ARGOCD_PATH" diff --git a/terraform/function_app.tf b/terraform/function_app.tf index 4f2c484a..b331f242 100644 --- a/terraform/function_app.tf +++ b/terraform/function_app.tf @@ -1,18 +1,16 @@ # Function App for reporting on VMs left running outside allowed time range -resource "azurerm_resource_group" "func_app" { - name = "${var.prefix}-rg-function-app" - location = var.location - tags = var.tags -} +# https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/storage_account resource "azurerm_storage_account" "func_app" { - name = "${var.prefix}stfuncapp" - resource_group_name = azurerm_resource_group.func_app.name - location = azurerm_resource_group.func_app.location - account_tier = "Standard" - account_replication_type = "LRS" - allow_blob_public_access = false - tags = var.tags + name = "${var.prefix}stfuncapp" + resource_group_name = azurerm_resource_group.aks.name + location = azurerm_resource_group.aks.location + account_tier = "Standard" + account_replication_type = "LRS" + allow_nested_items_to_be_public = false + enable_https_traffic_only = true + min_tls_version = "TLS1_2" + tags = var.tags } resource "azurerm_storage_container" "func_app" { @@ -21,111 +19,95 @@ resource "azurerm_storage_container" "func_app" { container_access_type = "private" } +data "archive_file" "func_app" { + type = "zip" + source_dir = "${path.module}/../function_app" + output_path = "function_release.zip" +} + resource "azurerm_storage_blob" "func_app" { - name = "function_app.zip" + # name will be "[filehash].zip" (filehash is the SHA256 hash of the file) + name = "${filesha256(data.archive_file.func_app.output_path)}.zip" storage_account_name = azurerm_storage_account.func_app.name storage_container_name = azurerm_storage_container.func_app.name + source = data.archive_file.func_app.output_path type = "Block" - source = "${path.module}/files/function_app.zip" } -data "azurerm_storage_account_sas" "func_app" { - connection_string = azurerm_storage_account.func_app.primary_connection_string - https_only = true - # start = formatdate("YYYY-MM-DD", timestamp()) - # expiry = formatdate("YYYY-MM-DD", timeadd(timestamp(), var.func_app_sas_expires_in_hours)) - - # hardcoded values to stop timestamp() affecting EVERY Terraform Plan - start = "2020-10-25" - expiry = "2022-01-01" - - resource_types { - object = true - container = false - service = false - } - - services { - blob = true - queue = false - table = false - file = false - } - - permissions { - read = true - write = false - delete = false - list = false - add = false - create = false - update = false - process = false - } -} - -resource "azurerm_app_service_plan" "func_app" { +# https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/service_plan +resource "azurerm_service_plan" "func_app" { name = "${var.prefix}-funcapp" - location = azurerm_resource_group.func_app.location - resource_group_name = azurerm_resource_group.func_app.name - # reserved = false # This needs to be set as 'false' otherwise the default is a Linux function app which won't work with our code - kind = "FunctionApp" - tags = var.tags - - # Consumption Plan - sku { - tier = "Dynamic" - size = "Y1" - } - + location = azurerm_resource_group.aks.location + resource_group_name = azurerm_resource_group.aks.name + os_type = "Linux" + sku_name = "Y1" + tags = var.tags } # Application Insights used for logs and monitoring +# https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/application_insights resource "azurerm_application_insights" "appinsights" { name = "${var.prefix}-funcapp" location = var.location - resource_group_name = azurerm_resource_group.func_app.name + resource_group_name = azurerm_resource_group.aks.name application_type = "web" + workspace_id = azurerm_log_analytics_workspace.aks[0].id tags = var.tags } # Function App using zipped up source files -resource "azurerm_function_app" "func_app" { - name = "${var.prefix}-funcapp" - location = azurerm_resource_group.func_app.location - resource_group_name = azurerm_resource_group.func_app.name - app_service_plan_id = azurerm_app_service_plan.func_app.id - https_only = true - storage_account_access_key = azurerm_storage_account.func_app.primary_access_key - storage_account_name = azurerm_storage_account.func_app.name - version = "~3" - tags = var.tags - app_settings = { - "APPINSIGHTS_INSTRUMENTATIONKEY" = azurerm_application_insights.appinsights.instrumentation_key - "FUNCTION_APP_EDIT_MODE" = "readonly" - "FUNCTIONS_WORKER_RUNTIME_VERSION" = "~7" - "FUNCTIONS_WORKER_RUNTIME" = "powershell" - "HASH" = base64encode(filesha256("${path.module}/files/function_app.zip")) - "IFTTT_WEBHOOK_KEY" = var.ifttt_webhook_key - "WEBSITE_RUN_FROM_PACKAGE" = "https://${azurerm_storage_account.func_app.name}.blob.core.windows.net/${azurerm_storage_container.func_app.name}/${azurerm_storage_blob.func_app.name}${data.azurerm_storage_account_sas.func_app.sas}" - "WEEKDAY_ALLOWED_TIME_RANGE" = "06:30 -> 09:00" - } +# https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/linux_function_app +resource "azurerm_linux_function_app" "func_app" { + name = "${var.prefix}-funcapp" + location = azurerm_resource_group.aks.location + resource_group_name = azurerm_resource_group.aks.name + service_plan_id = azurerm_service_plan.func_app.id + storage_account_name = azurerm_storage_account.func_app.name + storage_uses_managed_identity = true + enabled = true + https_only = true + tags = var.tags identity { type = "SystemAssigned" } + site_config { + # https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/linux_function_app#application_insights_key + application_insights_key = azurerm_application_insights.appinsights.instrumentation_key + + application_stack { + powershell_core_version = 7.2 + } + } + + # https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings + app_settings = { + # The Function app will only use the code in the blob if the computed hash matches the hash you specify in the app settings. The computed hash takes the SHA256 hash of the file and then base64 encodes it + # "HASH" = base64encode(filesha256("${path.module}/files/function_app.zip")) + "WEBSITE_RUN_FROM_PACKAGE" = azurerm_storage_blob.func_app.url + "IFTTT_WEBHOOK_KEY" = var.ifttt_webhook_key + "WEEKDAY_ALLOWED_TIME_RANGE" = "06:30 -> 08:00" + } + lifecycle { + # required to ignore the auto-generated "hidden-link:" tags ignore_changes = [ - app_settings, + tags ] } } +# Give Function App access to function zip blob +resource "azurerm_role_assignment" "func_app_storage" { + principal_id = azurerm_linux_function_app.func_app.identity[0].principal_id + role_definition_name = "Storage Blob Data Contributor" + scope = azurerm_storage_account.func_app.id +} # Give Function App Reader role for the AKS cluster node resource group -resource "azurerm_role_assignment" "func_app" { +resource "azurerm_role_assignment" "func_app_aks" { scope = data.azurerm_resource_group.aks_node_rg.id role_definition_name = "Reader" - principal_id = azurerm_function_app.func_app.identity.0.principal_id + principal_id = azurerm_linux_function_app.func_app.identity[0].principal_id } diff --git a/terraform/gitlab_helm.tf b/terraform/gitlab_helm.tf index 8d056642..9dee70d5 100644 --- a/terraform/gitlab_helm.tf +++ b/terraform/gitlab_helm.tf @@ -10,7 +10,7 @@ resource "kubernetes_namespace" "gitlab" { delete = "15m" } - depends_on = [module.aks] + depends_on = [azurerm_kubernetes_cluster.aks] } # https://www.terraform.io/docs/provisioners/local-exec.html @@ -31,7 +31,7 @@ resource "null_resource" "gitlab_cert_sync" { } depends_on = [ - local_file.kubeconfig, + local_sensitive_file.kubeconfig, helm_release.akv2k8s, kubernetes_namespace.gitlab ] diff --git a/terraform/helm/aad_pod_identity_default_values.yaml b/terraform/helm/aad_pod_identity_default_values.yaml index fce412f9..301b69bb 100644 --- a/terraform/helm/aad_pod_identity_default_values.yaml +++ b/terraform/helm/aad_pod_identity_default_values.yaml @@ -1,4 +1,4 @@ -# source: https://github.com/Azure/aad-pod-identity/blob/v1.8.3/charts/aad-pod-identity/values.yaml +# source: https://github.com/Azure/aad-pod-identity/blob/v1.8.13/charts/aad-pod-identity/values.yaml # Default values for aad-pod-identity-helm. # This is a YAML-formatted file. @@ -45,7 +45,7 @@ operationMode: "standard" mic: image: mic - tag: v1.8.3 + tag: v1.8.13 # ref: https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical priorityClassName: "" @@ -143,6 +143,12 @@ mic: # cloud configuration used to authenticate with Azure cloudConfig: "/etc/kubernetes/azure.json" + # Configures for a custom cloud per the example here: + # https://azure.github.io/aad-pod-identity/docs/configure/custom_cloud/ + customCloud: + enabled: false + configPath: "/etc/kubernetes/akscustom.json" + # The maximum retry of UpdateUserMSI call. MIC updates all the identities in a batch. If a single identity contains an error # or is invalid, then the entire operation fails. Configuring this flag will make MIC retry by removing the erroneous identities # returned in the error @@ -159,7 +165,7 @@ mic: nmi: image: nmi - tag: v1.8.3 + tag: v1.8.13 # ref: https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical priorityClassName: "" @@ -198,7 +204,7 @@ nmi: affinity: {} # nodeAffinity: # preferredDuringSchedulingIgnoredDuringExecution: - # - weight 1 + # - weight: 1 # preference: # matchExpressions: # - key: kubernetes.azure.com/mode @@ -237,8 +243,8 @@ nmi: blockInstanceMetadata: "" # https://github.com/Azure/aad-pod-identity/blob/master/docs/readmes/README.featureflags.md#metadata-header-required-flag - # default is false - metadataHeaderRequired: "" + # default is true + metadataHeaderRequired: true # enable running aad-pod-identity on clusters with kubenet # default is false @@ -251,6 +257,9 @@ nmi: # Set retry-after header in the NMI responses when the identity is still being assigned. setRetryAfterHeader: false + # Enable/Disable deletion of conntrack entries for pre-existing connections to metadata endpoint + enableConntrackDeletion: false + rbac: enabled: true # NMI requires permissions to get secrets when service principal (type: 1) is used in AzureIdentity. diff --git a/terraform/helm/argocd_default_values.yaml b/terraform/helm/argocd_default_values.yaml index 8e26b6e2..d67f3549 100644 --- a/terraform/helm/argocd_default_values.yaml +++ b/terraform/helm/argocd_default_values.yaml @@ -1,197 +1,701 @@ -# https://github.com/argoproj/argo-helm/blob/argo-cd-3.17.5/charts/argo-cd/values.yaml +# https://github.com/argoproj/argo-helm/blob/argo-cd-5.13.6/charts/argo-cd/values.yaml -## ArgoCD configuration +## Argo CD configuration ## Ref: https://github.com/argoproj/argo-cd ## + +# -- Provide a name in place of `argocd` nameOverride: argocd +# -- String to fully override `"argo-cd.fullname"` fullnameOverride: "" +# -- Override the Kubernetes version, which is used to evaluate certain manifests kubeVersionOverride: "" +# Override APIVersions +# If you want to template helm charts but cannot access k8s API server +# you can set api versions here +apiVersionOverrides: + # -- String to override apiVersion of cert-manager resources rendered by this helm chart + certmanager: "" # cert-manager.io/v1 + # -- String to override apiVersion of GKE resources rendered by this helm chart + cloudgoogle: "" # cloud.google.com/v1 + # -- String to override apiVersion of autoscaling rendered by this helm chart + autoscaling: "" # autoscaling/v2 + +# -- Create aggregated roles that extend existing cluster roles to interact with argo-cd resources +## Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles +createAggregateRoles: false +# -- Create cluster roles for cluster-wide installation. +## Used when you manage applications in the same cluster where Argo CD runs +createClusterRoles: true + +openshift: + # -- enables using arbitrary uid for argo repo server + enabled: false +## Custom resource configuration +crds: + # -- Install and upgrade CRDs + install: true + # -- Keep CRDs on chart uninstall + keep: true + # -- Annotations to be added to all CRDs + annotations: {} + +## Globally shared configuration global: + # -- Common labels for the all resources + additionalLabels: {} + # app: argo-cd + + # -- Number of old deployment ReplicaSets to retain. The rest will be garbage collected. + revisionHistoryLimit: 3 + + # Default image used by all components image: + # -- If defined, a repository applied to all Argo CD deployments repository: quay.io/argoproj/argocd - tag: v2.1.1 + # -- Overrides the global Argo CD image tag whose default is the chart appVersion + tag: "" + # -- If defined, a imagePullPolicy applied to all Argo CD deployments imagePullPolicy: IfNotPresent - ## Annotations applied to all pods + + # -- Secrets with credentials to pull images from a private registry + imagePullSecrets: [] + + # Default logging options used by all components + logging: + # -- Set the global logging format. Either: `text` or `json` + format: text + # -- Set the global logging level. One of: `debug`, `info`, `warn` or `error` + level: info + + # -- Annotations for the all deployed Statefulsets + statefulsetAnnotations: {} + + # -- Annotations for the all deployed Deployments + deploymentAnnotations: {} + + # -- Annotations for the all deployed pods podAnnotations: {} - ## Labels applied to all pods + + # -- Labels for the all deployed pods podLabels: {} + + # -- Toggle and define pod-level security context. + # @default -- `{}` (See [values.yaml]) securityContext: {} # runAsUser: 999 # runAsGroup: 999 # fsGroup: 999 - imagePullSecrets: [] + + # -- Mapping between IP and hostnames that will be injected as entries in the pod's hosts files hostAliases: [] # - ip: 10.20.30.40 # hostnames: # - git.myhostname networkPolicy: + # -- Create NetworkPolicy objects for all components create: false + # -- Default deny all ingress traffic defaultDenyIngress: false -# Override APIVersions -# If you want to template helm charts but cannot access k8s API server -# you can set api versions here -apiVersionOverrides: - certmanager: "" # cert-manager.io/v1 - ingress: "" # networking.k8s.io/v1beta1 +## Argo Configs +configs: + # General Argo CD configuration + ## Ref: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/argocd-cm.yaml + cm: + # -- Create the argocd-cm configmap for [declarative setup] + create: true -## Create clusterroles that extend existing clusterroles to interact with argo-cd crds -## Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles -createAggregateRoles: false + # -- Annotations to be added to argocd-cm configmap + annotations: {} + + # -- Argo CD's externally facing base URL (optional). Required when configuring SSO + url: "" + + # -- The name of tracking label used by Argo CD for resource pruning + # @default -- Defaults to app.kubernetes.io/instance + application.instanceLabelKey: argocd.argoproj.io/instance + + # -- Enable logs RBAC enforcement + ## Ref: https://argo-cd.readthedocs.io/en/latest/operator-manual/upgrading/2.3-2.4/#enable-logs-rbac-enforcement + server.rbac.log.enforce.enable: false + + # -- Enable exec feature in Argo UI + ## Ref: https://argo-cd.readthedocs.io/en/latest/operator-manual/rbac/#exec-resource + exec.enabled: false + + # -- Enable local admin user + ## Ref: https://argo-cd.readthedocs.io/en/latest/faq/#how-to-disable-admin-user + admin.enabled: true + + # -- Timeout to discover if a new manifests version got published to the repository + timeout.reconciliation: 180s + + # -- Timeout to refresh application data as well as target manifests cache + timeout.hard.reconciliation: 0s + + # Dex configuration + # dex.config: | + # connectors: + # # GitHub example + # - type: github + # id: github + # name: GitHub + # config: + # clientID: aabbccddeeff00112233 + # clientSecret: $dex.github.clientSecret # Alternatively $:dex.github.clientSecret + # orgs: + # - name: your-github-org + + # OIDC configuration as an alternative to dex (optional). + # oidc.config: | + # name: AzureAD + # issuer: https://login.microsoftonline.com/TENANT_ID/v2.0 + # clientID: CLIENT_ID + # clientSecret: $oidc.azuread.clientSecret + # rootCA: | + # -----BEGIN CERTIFICATE----- + # ... encoded certificate data here ... + # -----END CERTIFICATE----- + # requestedIDTokenClaims: + # groups: + # essential: true + # requestedScopes: + # - openid + # - profile + # - email + + # Argo CD configuration parameters + ## Ref: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/argocd-cmd-params-cm.yaml + params: + # -- Annotations to be added to the argocd-cmd-params-cm ConfigMap + annotations: {} + + ## Generic parameters + # -- Open-Telemetry collector address: (e.g. "otel-collector:4317") + otlp.address: '' + + ## Controller Properties + # -- Number of application status processors + controller.status.processors: 20 + # -- Number of application operation processors + controller.operation.processors: 10 + # -- Specifies timeout between application self heal attempts + controller.self.heal.timeout.seconds: 5 + # -- Repo server RPC call timeout seconds. + controller.repo.server.timeout.seconds: 60 + + ## Server properties + # -- Run server without TLS + server.insecure: false + # -- Value for base href in index.html. Used if Argo CD is running behind reverse proxy under subpath different from / + server.basehref: / + # -- Used if Argo CD is running behind reverse proxy under subpath different from / + server.rootpath: '' + # -- Directory path that contains additional static assets + server.staticassets: /shared/app + # -- Disable Argo CD RBAC for user authentication + server.disable.auth: false + # -- Enable GZIP compression + server.enable.gzip: false + # -- Set X-Frame-Options header in HTTP responses to value. To disable, set to "". + server.x.frame.options: sameorigin + + ## Repo-server properties + # -- Limit on number of concurrent manifests generate requests. Any value less the 1 means no limit. + reposerver.parallelism.limit: 0 + + # Argo CD RBAC policy configuration + ## Ref: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/rbac.md + rbac: + # -- Create the argocd-rbac-cm configmap with ([Argo CD RBAC policy]) definitions. + # If false, it is expected the configmap will be created by something else. + # Argo CD will not work if there is no configmap created with the name above. + create: true + + # -- Annotations to be added to argocd-rbac-cm configmap + annotations: {} + + # -- The name of the default role which Argo CD will falls back to, when authorizing API requests (optional). + # If omitted or empty, users may be still be able to login, but will see no apps, projects, etc... + policy.default: '' + + # -- File containing user-defined policies and role definitions. + # @default -- `''` (See [values.yaml]) + policy.csv: '' + # Policy rules are in the form: + # p, subject, resource, action, object, effect + # Role definitions and bindings are in the form: + # g, subject, inherited-subject + # policy.csv | + # p, role:org-admin, applications, *, */*, allow + # p, role:org-admin, clusters, get, *, allow + # p, role:org-admin, repositories, *, *, allow + # p, role:org-admin, logs, get, *, allow + # p, role:org-admin, exec, create, */*, allow + # g, your-github-org:your-team, role:org-admin + + # -- OIDC scopes to examine during rbac enforcement (in addition to `sub` scope). + # The scope value can be a string, or a list of strings. + scopes: "[groups]" + + # GnuPG public keys for commit verification + ## Ref: https://argo-cd.readthedocs.io/en/stable/user-guide/gpg-verification/ + gpg: + # -- Annotations to be added to argocd-gpg-keys-cm configmap + annotations: {} + + # -- [GnuPG] public keys to add to the keyring + # @default -- `{}` (See [values.yaml]) + ## Note: Public keys should be exported with `gpg --export --armor ` + keys: {} + # 4AEE18F83AFDEB23: | + # -----BEGIN PGP PUBLIC KEY BLOCK----- + # ... + # -----END PGP PUBLIC KEY BLOCK----- + + # SSH known hosts for Git repositories + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#ssh-known-host-public-keys + ssh: + # -- Annotations to be added to argocd-ssh-known-hosts-cm configmap + annotations: {} + + # -- Known hosts to be added to the known host list by default. + # @default -- See [values.yaml] + knownHosts: | + bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw== + github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg= + github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl + github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ== + gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY= + gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf + gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9 + ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H + vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H + + # -- Additional known hosts for private repositories + extraHosts: '' + + # Repository TLS certificates + # Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#repositories-using-self-signed-tls-certificates-or-are-signed-by-custom-ca + tls: + # -- Annotations to be added to argocd-tls-certs-cm configmap + annotations: {} + + # -- TLS certificates for Git repositories + # @default -- `{}` (See [values.yaml]) + certificates: {} + # server.example.com: | + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- + + # -- Provide one or multiple [external cluster credentials] + # @default -- `[]` (See [values.yaml]) + ## Ref: + ## - https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#clusters + ## - https://argo-cd.readthedocs.io/en/stable/operator-manual/security/#external-cluster-credentials + clusterCredentials: [] + # - name: mycluster + # server: https://mycluster.com + # labels: {} + # annotations: {} + # config: + # bearerToken: "" + # tlsClientConfig: + # insecure: false + # caData: "" + # - name: mycluster2 + # server: https://mycluster2.com + # labels: {} + # annotations: {} + # namespaces: namespace1,namespace2 + # clusterResources: true + # config: + # bearerToken: "" + # tlsClientConfig: + # insecure: false + # caData: "" + + # DEPRECATED - Moved to configs.ssh.annotations + # knownHostsAnnotations: {} + # DEPRECATED - Moved to configs.ssh.knownHosts + # knownHosts: {} + + # DEPRECATED - Moved to configs.tls.annotations + # tlsCertsAnnotations: {} + # DEPRECATED - Moved to configs.tls.certificates + # tlsCerts: {} + + # -- Repository credentials to be used as Templates for other repos + ## Creates a secret for each key/value specified below to create repository credentials + credentialTemplates: {} + # github-enterprise-creds-1: + # url: https://github.com/argoproj + # githubAppID: 1 + # githubAppInstallationID: 2 + # githubAppEnterpriseBaseUrl: https://ghe.example.com/api/v3 + # githubAppPrivateKey: | + # -----BEGIN OPENSSH PRIVATE KEY----- + # ... + # -----END OPENSSH PRIVATE KEY----- + # https-creds: + # url: https://github.com/argoproj + # password: my-password + # username: my-username + # ssh-creds: + # url: git@github.com:argoproj-labs + # sshPrivateKey: | + # -----BEGIN OPENSSH PRIVATE KEY----- + # ... + # -----END OPENSSH PRIVATE KEY----- + + # -- Annotations to be added to `configs.credentialTemplates` Secret + credentialTemplatesAnnotations: {} + + # -- Repositories list to be used by applications + ## Creates a secret for each key/value specified below to create repositories + ## Note: the last example in the list would use a repository credential template, configured under "configs.repositoryCredentials". + repositories: {} + # istio-helm-repo: + # url: https://storage.googleapis.com/istio-prerelease/daily-build/master-latest-daily/charts + # name: istio.io + # type: helm + # private-helm-repo: + # url: https://my-private-chart-repo.internal + # name: private-repo + # type: helm + # password: my-password + # username: my-username + # private-repo: + # url: https://github.com/argoproj/private-repo + + # -- Annotations to be added to `configs.repositories` Secret + repositoriesAnnotations: {} + + # Argo CD sensitive data + # Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/user-management/#sensitive-data-and-sso-client-secrets + secret: + # -- Create the argocd-secret + createSecret: true + # -- Labels to be added to argocd-secret + labels: {} + # -- Annotations to be added to argocd-secret + annotations: {} + + # -- Shared secret for authenticating GitHub webhook events + githubSecret: "" + # -- Shared secret for authenticating GitLab webhook events + gitlabSecret: "" + # -- Shared secret for authenticating BitbucketServer webhook events + bitbucketServerSecret: "" + # -- UUID for authenticating Bitbucket webhook events + bitbucketUUID: "" + # -- Shared secret for authenticating Gogs webhook events + gogsSecret: "" + + # -- add additional secrets to be added to argocd-secret + ## Custom secrets. Useful for injecting SSO secrets into environment variables. + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/user-management/#sensitive-data-and-sso-client-secrets + ## Note that all values must be non-empty. + extra: + {} + # LDAP_PASSWORD: "mypassword" + + # -- Argo TLS Data + # DEPRECATED - Use server.certificate or server.certificateSecret + # argocdServerTlsConfig: + # key: '' + # crt: '' + + # -- Bcrypt hashed admin password + ## Argo expects the password in the secret to be bcrypt hashed. You can create this hash with + ## `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/'` + argocdServerAdminPassword: "" + # -- Admin password modification time. Eg. `"2006-01-02T15:04:05Z"` + # @default -- `""` (defaults to current time) + argocdServerAdminPasswordMtime: "" + + # -- Define custom [CSS styles] for your argo instance. + # This setting will automatically mount the provided CSS and reference it in the argo configuration. + # @default -- `""` (See [values.yaml]) + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/custom-styles/ + styles: "" + # styles: | + # .nav-bar { + # background: linear-gradient(to bottom, #999, #777, #333, #222, #111); + # } -## Controller +# -- Array of extra K8s manifests to deploy +## Note: Supports use of custom Helm templates +extraObjects: [] + # - apiVersion: secrets-store.csi.x-k8s.io/v1 + # kind: SecretProviderClass + # metadata: + # name: argocd-secrets-store + # spec: + # provider: aws + # parameters: + # objects: | + # - objectName: "argocd" + # objectType: "secretsmanager" + # jmesPath: + # - path: "client_id" + # objectAlias: "client_id" + # - path: "client_secret" + # objectAlias: "client_secret" + # secretObjects: + # - data: + # - key: client_id + # objectName: client_id + # - key: client_secret + # objectName: client_secret + # secretName: argocd-secrets-store + # type: Opaque + # labels: + # app.kubernetes.io/part-of: argocd + +## Application controller controller: + # -- Application controller name string name: application-controller - image: - repository: # defaults to global.image.repository - tag: # defaults to global.image.tag - imagePullPolicy: # IfNotPresent - - # If changing the number of replicas you must pass the number as ARGOCD_CONTROLLER_REPLICAS as an environment variable + # -- The number of application controller pods to run. + # Additional replicas will cause sharding of managed clusters across number of replicas. replicas: 1 - # Deploy the application as a StatefulSet instead of a Deployment, this is required for HA capability. - # This is a feature flag that will become the default in chart version 3.x - enableStatefulSet: false + ## Application controller Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the application controller + enabled: false + # -- Labels to be added to application controller pdb + labels: {} + # -- Annotations to be added to application controller pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `controller.pdb.minAvailable` + maxUnavailable: "" + + ## Application controller image + image: + # -- Repository to use for the application controller + # @default -- `""` (defaults to global.image.repository) + repository: "" + # -- Tag to use for the application controller + # @default -- `""` (defaults to global.image.tag) + tag: "" + # -- Image pull policy for the application controller + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- Secrets with credentials to pull images from a private registry + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] - ## Argo controller commandline flags - args: - statusProcessors: "20" - operationProcessors: "10" - appResyncPeriod: "180" - selfHealTimeout: "5" - repoServerTimeoutSeconds: "60" - - ## Argo controller log format: text|json - logFormat: text - ## Argo controller log level - logLevel: info - - ## Additional command line arguments to pass to argocd-controller - ## + # -- DEPRECATED - Application controller commandline flags + args: {} + # DEPRECATED - Use configs.params to override + # # -- define the application controller `--status-processors` + # statusProcessors: "20" + # # -- define the application controller `--operation-processors` + # operationProcessors: "10" + # # -- define the application controller `--app-hard-resync` + # appHardResyncPeriod: "0" + # # -- define the application controller `--app-resync` + # appResyncPeriod: "180" + # # -- define the application controller `--self-heal-timeout-seconds` + # selfHealTimeout: "5" + # # -- define the application controller `--repo-server-timeout-seconds` + # repoServerTimeoutSeconds: "60" + + # -- Additional command line arguments to pass to application controller extraArgs: [] - ## Environment variables to pass to argocd-controller - ## - env: - [] - # - name: "ARGOCD_CONTROLLER_REPLICAS" - # value: "" + # -- Environment variables to pass to application controller + env: [] - ## envFrom to pass to argocd-controller - ## + # -- envFrom to pass to application controller + # @default -- `[]` (See [values.yaml]) envFrom: [] # - configMapRef: # name: config-map-name # - secretRef: # name: secret-name - ## Annotations to be added to controller pods - ## + # -- Additional containers to be added to the application controller pod + ## Note: Supports use of custom Helm templates + extraContainers: [] + + # -- Init containers to add to the application controller pod + ## If your target Kubernetes cluster(s) require a custom credential (exec) plugin + ## you could use this (and the same in the server pod) to provide such executable + ## Ref: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins + ## Note: Supports use of custom Helm templates + initContainers: [] + # - name: download-tools + # image: alpine:3 + # command: [sh, -c] + # args: + # - wget -qO kubelogin.zip https://github.com/Azure/kubelogin/releases/download/v0.0.25/kubelogin-linux-amd64.zip && + # unzip kubelogin.zip && mv bin/linux_amd64/kubelogin /custom-tools/ + # volumeMounts: + # - mountPath: /custom-tools + # name: custom-tools + + # -- Additional volumeMounts to the application controller main container + volumeMounts: [] + # - mountPath: /usr/local/bin/kubelogin + # name: custom-tools + # subPath: kubelogin + + # -- Additional volumes to the application controller pod + volumes: [] + # - name: custom-tools + # emptyDir: {} + + # -- Annotations for the application controller StatefulSet + statefulsetAnnotations: {} + + # -- Annotations to be added to application controller pods podAnnotations: {} - ## Labels to be added to controller pods - ## + # -- Labels to be added to application controller pods podLabels: {} - ## Labels to set container specific security contexts - containerSecurityContext: - {} - # capabilities: - # drop: - # - all - # readOnlyRootFilesystem: true - # runAsNonRoot: true + # -- Resource limits and requests for the application controller pods + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 250m + # memory: 256Mi - ## Configures the controller port - containerPort: 8082 + # Application controller container ports + containerPorts: + # -- Metrics container port + metrics: 8082 - ## Readiness and liveness probes for default backend + # -- Application controller container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + # Rediness probe for application controller ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ - ## readinessProbe: + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed successThreshold: 1 + # -- Number of seconds after which the [probe] times out timeoutSeconds: 1 - livenessProbe: - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - - ## Additional volumeMounts to the controller main container. - volumeMounts: [] - - ## Additional volumes to the controller pod. - volumes: [] - - ## Controller service configuration - service: - annotations: {} - labels: {} - port: 8082 - portName: https-controller - ## Node selectors and tolerations for server scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## + # -- [Node selector] nodeSelector: {} + + # -- [Tolerations] for use with node taints tolerations: [] + + # -- Assign custom [affinity] rules to the deployment affinity: {} - priorityClassName: "" + # -- Assign custom [TopologySpreadConstraints] rules to the application controller + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule - resources: {} - # limits: - # cpu: 500m - # memory: 512Mi - # requests: - # cpu: 250m - # memory: 256Mi + # -- Priority class for the application controller pods + priorityClassName: "" serviceAccount: + # -- Create a service account for the application controller create: true + # -- Service account name name: argocd-application-controller - ## Annotations applied to created service account + # -- Annotations applied to created service account annotations: {} - ## Automount API credentials for the Service Account + # -- Labels applied to created service account + labels: {} + # -- Automount API credentials for the Service Account automountServiceAccountToken: true - ## Server metrics controller configuration + ## Application controller metrics configuration metrics: + # -- Deploy metrics service enabled: false + applicationLabels: + # -- Enables additional labels in argocd_app_labels metric + enabled: false + # -- Additional labels + labels: [] service: + # -- Metrics service annotations annotations: {} + # -- Metrics service labels labels: {} + # -- Metrics service port servicePort: 8082 + # -- Metrics service port name + portName: http-metrics serviceMonitor: + # -- Enable a prometheus ServiceMonitor enabled: false + # -- Prometheus ServiceMonitor interval interval: 30s + # -- Prometheus [RelabelConfigs] to apply to samples before scraping relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion metricRelabelings: [] - # selector: - # prometheus: kube-prometheus - # namespace: monitoring - # additionalLabels: {} + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- Prometheus ServiceMonitor namespace + namespace: "" # "monitoring" + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} rules: + # -- Deploy a PrometheusRule for the application controller enabled: false + # -- PrometheusRule.Spec for the application controller spec: [] # - alert: ArgoAppMissing # expr: | - # absent(argocd_app_info) + # absent(argocd_app_info) == 1 # for: 15m # labels: # severity: critical # annotations: - # summary: "[ArgoCD] No reported applications" + # summary: "[Argo CD] No reported applications" # description: > - # ArgoCD has not reported any applications data for the past 15 minutes which + # Argo CD has not reported any applications data for the past 15 minutes which # means that it must be down or not functioning properly. This needs to be # resolved for this cloud to continue to maintain state. # - alert: ArgoAppNotSynced @@ -210,420 +714,955 @@ controller: # prometheus: kube-prometheus # namespace: monitoring # additionalLabels: {} + # annotations: {} - ## Enable Admin ClusterRole resources. - ## Enable if you would like to grant rights to ArgoCD to deploy to the local Kubernetes cluster. - clusterAdminAccess: - enabled: true - ## Enable Custom Rules for the Application Controller's Cluster Role resource ## Enable this and set the rules: to whatever custom rules you want for the Cluster Role resource. ## Defaults to off clusterRoleRules: + # -- Enable custom rules for the application controller's ClusterRole resource enabled: false + # -- List of custom rules for the application controller's ClusterRole resource rules: [] - ## Dex dex: + # -- Enable dex enabled: true + # -- Dex name name: dex-server + # -- Additional command line arguments to pass to the Dex server + extraArgs: [] + metrics: + # -- Deploy metrics service enabled: false service: + # -- Metrics service annotations annotations: {} + # -- Metrics service labels labels: {} + # -- Metrics service port name + portName: http-metrics serviceMonitor: + # -- Enable a prometheus ServiceMonitor enabled: false + # -- Prometheus ServiceMonitor interval interval: 30s + # -- Prometheus [RelabelConfigs] to apply to samples before scraping relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion metricRelabelings: [] - # selector: - # prometheus: kube-prometheus - # namespace: monitoring - # additionalLabels: {} + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- Prometheus ServiceMonitor namespace + namespace: "" # "monitoring" + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} + ## Dex Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the Dex server + enabled: false + # -- Labels to be added to Dex server pdb + labels: {} + # -- Annotations to be added to Dex server pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `dex.pdb.minAvailable` + maxUnavailable: "" + + ## Dex image image: + # -- Dex image repository repository: ghcr.io/dexidp/dex - tag: v2.30.0 - imagePullPolicy: IfNotPresent - initImage: - repository: - tag: - imagePullPolicy: + # -- Dex image tag + tag: v2.35.3 + # -- Dex imagePullPolicy + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- Secrets with credentials to pull images from a private registry + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] - ## Environment variables to pass to the Dex server - ## + # Argo CD init image that creates Dex config + initImage: + # -- Argo CD init image repository + # @default -- `""` (defaults to global.image.repository) + repository: "" + # -- Argo CD init image tag + # @default -- `""` (defaults to global.image.tag) + tag: "" + # -- Argo CD init image imagePullPolicy + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- Environment variables to pass to the Dex server env: [] - ## envFrom to pass to the Dex server + # -- envFrom to pass to the Dex server + # @default -- `[]` (See [values.yaml]) envFrom: [] # - configMapRef: # name: config-map-name # - secretRef: # name: secret-name - ## Annotations to be added to the Dex server pods - ## + # -- Additional containers to be added to the dex pod + ## Note: Supports use of custom Helm templates + extraContainers: [] + + # -- Init containers to add to the dex pod + ## Note: Supports use of custom Helm templates + initContainers: [] + + # -- Additional volumeMounts to the dex main container + volumeMounts: [] + + # -- Additional volumes to the dex pod + volumes: [] + + # TLS certificate configuration via Secret + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#configuring-tls-to-argocd-dex-server + ## Note: Issuing certificates via cert-manager in not supported right now because it's not possible to restart Dex automatically without extra controllers. + certificateSecret: + # -- Create argocd-dex-server-tls secret + enabled: false + # -- Labels to be added to argocd-dex-server-tls secret + labels: {} + # -- Annotations to be added to argocd-dex-server-tls secret + annotations: {} + # -- Certificate authority. Required for self-signed certificates. + ca: '' + # -- Certificate private key + key: '' + # -- Certificate data. Must contain SANs of Dex service (ie: argocd-dex-server, argocd-dex-server.argo-cd.svc) + crt: '' + + # -- Annotations to be added to the Dex server Deployment + deploymentAnnotations: {} + + # -- Annotations to be added to the Dex server pods podAnnotations: {} - ## Labels to be added to the Dex server pods - ## + # -- Labels to be added to the Dex server pods podLabels: {} - ## Probes for Dex server - ## Supported from Dex >= 2.28.0 - livenessProbe: - enabled: false - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 + # -- Resource limits and requests for dex + resources: {} + # limits: + # cpu: 50m + # memory: 64Mi + # requests: + # cpu: 10m + # memory: 32Mi + + # Dex container ports + # NOTE: These ports are currently hardcoded and cannot be changed + containerPorts: + # -- HTTP container port + http: 5556 + # -- gRPC container port + grpc: 5557 + # -- Metrics container port + metrics: 5558 + + # -- Dex container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + ## Probes for Dex server + ## Supported from Dex >= 2.28.0 + livenessProbe: + # -- Enable Kubernetes liveness probe for Dex >= 2.28.0 + enabled: false + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + readinessProbe: + # -- Enable Kubernetes readiness probe for Dex >= 2.28.0 enabled: false + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed successThreshold: 1 + # -- Number of seconds after which the [probe] times out timeoutSeconds: 1 serviceAccount: + # -- Create dex service account create: true + # -- Dex service account name name: argocd-dex-server - ## Annotations applied to created service account + # -- Annotations applied to created service account annotations: {} - ## Automount API credentials for the Service Account + # -- Automount API credentials for the Service Account automountServiceAccountToken: true - ## Additional volumeMounts to the controller main container. - volumeMounts: - - name: static-files - mountPath: /shared - - ## Additional volumes to the controller pod. - volumes: - - name: static-files - emptyDir: {} - - ## Dex deployment container ports - containerPortHttp: 5556 + # -- Service port for HTTP access servicePortHttp: 5556 + # -- Service port name for HTTP access servicePortHttpName: http - containerPortGrpc: 5557 + # -- Service port for gRPC access servicePortGrpc: 5557 + # -- Service port name for gRPC access servicePortGrpcName: grpc - containerPortMetrics: 5558 + # -- Service port for metrics access servicePortMetrics: 5558 - ## Node selectors and tolerations for server scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## + # -- [Node selector] nodeSelector: {} + # -- [Tolerations] for use with node taints tolerations: [] + # -- Assign custom [affinity] rules to the deployment affinity: {} - priorityClassName: "" - - ## Labels to set container specific security contexts - containerSecurityContext: - {} - # capabilities: - # drop: - # - all - # readOnlyRootFilesystem: true + # -- Assign custom [TopologySpreadConstraints] rules to dex + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule - resources: {} - # limits: - # cpu: 50m - # memory: 64Mi - # requests: - # cpu: 10m - # memory: 32Mi + # -- Priority class for dex + priorityClassName: "" ## Redis redis: + # -- Enable redis enabled: true + # -- Redis name name: redis + ## Redis Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the Redis + enabled: false + # -- Labels to be added to Redis pdb + labels: {} + # -- Annotations to be added to Redis pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `redis.pdb.minAvailable` + maxUnavailable: "" + + ## Redis image image: - repository: redis - tag: 6.2.4-alpine - imagePullPolicy: IfNotPresent + # -- Redis repository + repository: public.ecr.aws/docker/library/redis + # -- Redis tag + tag: 7.0.7-alpine + # -- Redis image pull policy + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + ## Prometheus redis-exporter sidecar + exporter: + # -- Enable Prometheus redis-exporter sidecar + enabled: false + ## Prometheus redis-exporter image + image: + # -- Repository to use for the redis-exporter + repository: public.ecr.aws/bitnami/redis-exporter + # -- Tag to use for the redis-exporter + tag: 1.45.0 + # -- Image pull policy for the redis-exporter + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- Redis exporter security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + # -- Resource limits and requests for redis-exporter sidecar + resources: {} + # limits: + # cpu: 50m + # memory: 64Mi + # requests: + # cpu: 10m + # memory: 32Mi + + # -- Secrets with credentials to pull images from a private registry + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] - ## Additional command line arguments to pass to redis-server - ## + # -- Additional command line arguments to pass to redis-server extraArgs: [] # - --bind # - "0.0.0.0" - containerPort: 6379 - servicePort: 6379 - - ## Environment variables to pass to the Redis server - ## + # -- Environment variables to pass to the Redis server env: [] - ## envFrom to pass to the Redis server - ## + # -- envFrom to pass to the Redis server + # @default -- `[]` (See [values.yaml]) envFrom: [] # - configMapRef: # name: config-map-name # - secretRef: # name: secret-name - ## Annotations to be added to the Redis server pods - ## + # -- Additional containers to be added to the redis pod + ## Note: Supports use of custom Helm templates + extraContainers: [] + + # -- Init containers to add to the redis pod + ## Note: Supports use of custom Helm templates + initContainers: [] + + # -- Additional volumeMounts to the redis container + volumeMounts: [] + + # -- Additional volumes to the redis pod + volumes: [] + + # -- Annotations to be added to the Redis server Deployment + deploymentAnnotations: {} + + # -- Annotations to be added to the Redis server pods podAnnotations: {} - ## Labels to be added to the Redis server pods - ## + # -- Labels to be added to the Redis server pods podLabels: {} - ## Node selectors and tolerations for server scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## + # -- Resource limits and requests for redis + resources: {} + # limits: + # cpu: 200m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 64Mi + + # -- Redis pod-level security context + # @default -- See [values.yaml] + securityContext: + runAsNonRoot: true + runAsUser: 999 + seccompProfile: + type: RuntimeDefault + + # Redis container ports + containerPorts: + # -- Redis container port + redis: 6379 + # -- Metrics container port + metrics: 9121 + + # -- Redis container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + + # -- Redis service port + servicePort: 6379 + + # -- [Node selector] nodeSelector: {} + + # -- [Tolerations] for use with node taints tolerations: [] - affinity: {} - priorityClassName: "" + # -- Assign custom [affinity] rules to the deployment + affinity: {} - ## Labels to set container specific security contexts - containerSecurityContext: - {} - # capabilities: - # drop: - # - all - # readOnlyRootFilesystem: true + # -- Assign custom [TopologySpreadConstraints] rules to redis + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule - ## Redis Pod specific security context - securityContext: - runAsNonRoot: true - runAsUser: 999 + # -- Priority class for redis + priorityClassName: "" serviceAccount: + # -- Create a service account for the redis pod create: false + # -- Service account name for redis pod name: "" - ## Annotations applied to created service account + # -- Annotations applied to created service account annotations: {} - ## Automount API credentials for the Service Account + # -- Automount API credentials for the Service Account automountServiceAccountToken: false - resources: {} - # limits: - # cpu: 200m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 64Mi + service: + # -- Redis service annotations + annotations: {} + # -- Additional redis service labels + labels: {} - volumeMounts: [] - volumes: [] + metrics: + # -- Deploy metrics service + enabled: false + + # Redis metrics service configuration + service: + # -- Metrics service type + type: ClusterIP + # -- Metrics service clusterIP. `None` makes a "headless service" (no virtual IP) + clusterIP: None + # -- Metrics service annotations + annotations: {} + # -- Metrics service labels + labels: {} + # -- Metrics service port + servicePort: 9121 + # -- Metrics service port name + portName: http-metrics + + serviceMonitor: + # -- Enable a prometheus ServiceMonitor + enabled: false + # -- Interval at which metrics should be scraped + interval: 30s + # -- Prometheus [RelabelConfigs] to apply to samples before scraping + relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion + metricRelabelings: [] + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- Prometheus ServiceMonitor namespace + namespace: "" # "monitoring" + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} # This key configures Redis-HA subchart and when enabled (redis-ha.enabled=true) # the custom redis deployment is omitted +# Check the redis-ha chart for more properties redis-ha: + # -- Enables the Redis HA subchart and disables the custom Redis single node deployment enabled: false - # Check the redis-ha chart for more properties + ## Prometheus redis-exporter sidecar exporter: - enabled: true + # -- Enable Prometheus redis-exporter sidecar + enabled: false + # -- Repository to use for the redis-exporter + image: public.ecr.aws/bitnami/redis-exporter + # -- Tag to use for the redis-exporter + tag: 1.45.0 persistentVolume: + # -- Configures persistency on Redis nodes enabled: false redis: + # -- Redis convention for naming the cluster group: must match `^[\\w-\\.]+$` and can be templated masterGroupName: argocd + # -- Any valid redis config options in this section will be applied to each server (see `redis-ha` chart) + # @default -- See [values.yaml] config: + # -- Will save the DB if both the given number of seconds and the given number of write operations against the DB occurred. `""` is disabled + # @default -- `'""'` save: '""' haproxy: + # -- Enabled HAProxy LoadBalancing/Proxy enabled: true metrics: + # -- HAProxy enable prometheus metric scraping enabled: true image: - tag: 6.2.4-alpine + # -- Redis tag + tag: 7.0.7-alpine + + ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + topologySpreadConstraints: + # -- Enable Redis HA topology spread constraints + enabled: false + # -- Max skew of pods tolerated + # @default -- `""` (defaults to `1`) + maxSkew: "" + # -- Topology key for spread + # @default -- `""` (defaults to `topology.kubernetes.io/zone`) + topologyKey: "" + # -- Enforcement policy, hard or soft + # @default -- `""` (defaults to `ScheduleAnyway`) + whenUnsatisfiable: "" + +# External Redis parameters +externalRedis: + # -- External Redis server host + host: "" + # -- External Redis username + username: "" + # -- External Redis password + password: "" + # -- External Redis server port + port: 6379 + # -- The name of an existing secret with Redis credentials (must contain key `redis-password`). + # When it's set, the `externalRedis.password` parameter is ignored + existingSecret: "" + # -- External Redis Secret annotations + secretAnnotations: {} ## Server server: + # -- Argo CD server name name: server + # -- The number of server pods to run replicas: 1 + ## Argo CD server Horizontal Pod Autoscaler autoscaling: + # -- Enable Horizontal Pod Autoscaler ([HPA]) for the Argo CD server enabled: false + # -- Minimum number of replicas for the Argo CD server [HPA] minReplicas: 1 + # -- Maximum number of replicas for the Argo CD server [HPA] maxReplicas: 5 + # -- Average CPU utilization percentage for the Argo CD server [HPA] targetCPUUtilizationPercentage: 50 + # -- Average memory utilization percentage for the Argo CD server [HPA] targetMemoryUtilizationPercentage: 50 - + # -- Configures the scaling behavior of the target in both Up and Down directions. + # This is only available on HPA apiVersion `autoscaling/v2beta2` and newer + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + + ## Argo CD server Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the Argo CD server + enabled: false + # -- Labels to be added to Argo CD server pdb + labels: {} + # -- Annotations to be added to Argo CD server pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `server.pdb.minAvailable` + maxUnavailable: "" + + ## Argo CD server image image: - repository: # defaults to global.image.repository - tag: # defaults to global.image.tag - imagePullPolicy: # IfNotPresent + # -- Repository to use for the Argo CD server + # @default -- `""` (defaults to global.image.repository) + repository: "" # defaults to global.image.repository + # -- Tag to use for the Argo CD server + # @default -- `""` (defaults to global.image.tag) + tag: "" # defaults to global.image.tag + # -- Image pull policy for the Argo CD server + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" # IfNotPresent + + # -- Secrets with credentials to pull images from a private registry + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] - ## Additional command line arguments to pass to argocd-server - ## + # -- Additional command line arguments to pass to Argo CD server extraArgs: [] - # - --insecure - - # This flag is used to either remove or pass the CLI flag --staticassets /shared/app to the argocd-server app - staticAssets: - enabled: true - ## Environment variables to pass to argocd-server - ## + # -- Environment variables to pass to Argo CD server env: [] - ## envFrom to pass to argocd-server - ## + # -- envFrom to pass to Argo CD server + # @default -- `[]` (See [values.yaml]) envFrom: [] # - configMapRef: # name: config-map-name # - secretRef: # name: secret-name - ## Specify postStart and preStop lifecycle hooks for your argo-cd-server container - ## + # -- Specify postStart and preStop lifecycle hooks for your argo-cd-server container lifecycle: {} - ## Argo server log format: text|json - logFormat: text - ## Argo server log level - logLevel: info + ## Argo UI extensions + ## This function in tech preview stage, do expect unstability or breaking changes in newer versions. + ## Ref: https://github.com/argoproj-labs/argocd-extensions + extensions: + # -- Enable support for Argo UI extensions + enabled: false + + ## Argo UI extensions image + image: + # -- Repository to use for extensions image + repository: "ghcr.io/argoproj-labs/argocd-extensions" + # -- Tag to use for extensions image + tag: "v0.2.1" + # -- Image pull policy for extensions + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- Server UI extensions container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + # -- Resource limits and requests for the argocd-extensions container + resources: {} + # limits: + # cpu: 50m + # memory: 128Mi + # requests: + # cpu: 10m + # memory: 64Mi + + # -- Additional containers to be added to the server pod + ## Note: Supports use of custom Helm templates + extraContainers: [] + # - name: my-sidecar + # image: nginx:latest + # - name: lemonldap-ng-controller + # image: lemonldapng/lemonldap-ng-controller:0.2.0 + # args: + # - /lemonldap-ng-controller + # - --alsologtostderr + # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration + # env: + # - name: POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: copy-portal-skins + # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + + # -- Init containers to add to the server pod + ## If your target Kubernetes cluster(s) require a custom credential (exec) plugin + ## you could use this (and the same in the application controller pod) to provide such executable + ## Ref: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins + initContainers: [] + # - name: download-tools + # image: alpine:3 + # command: [sh, -c] + # args: + # - wget -qO kubelogin.zip https://github.com/Azure/kubelogin/releases/download/v0.0.25/kubelogin-linux-amd64.zip && + # unzip kubelogin.zip && mv bin/linux_amd64/kubelogin /custom-tools/ + # volumeMounts: + # - mountPath: /custom-tools + # name: custom-tools + + # -- Additional volumeMounts to the server main container + volumeMounts: [] + # - mountPath: /usr/local/bin/kubelogin + # name: custom-tools + # subPath: kubelogin + + # -- Additional volumes to the server pod + volumes: [] + # - name: custom-tools + # emptyDir: {} + + # -- Annotations to be added to server Deployment + deploymentAnnotations: {} - ## Annotations to be added to controller pods - ## + # -- Annotations to be added to server pods podAnnotations: {} - ## Labels to be added to controller pods - ## + # -- Labels to be added to server pods podLabels: {} - ## Configures the server port - containerPort: 8080 + # -- Resource limits and requests for the Argo CD server + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 50m + # memory: 64Mi + + # Server container ports + containerPorts: + # -- Server container port + server: 8080 + # -- Metrics container port + metrics: 8082 + + # -- Server container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL ## Readiness and liveness probes for default backend ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ - ## readinessProbe: + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed successThreshold: 1 + # -- Number of seconds after which the [probe] times out timeoutSeconds: 1 + livenessProbe: + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed successThreshold: 1 + # -- Number of seconds after which the [probe] times out timeoutSeconds: 1 - ## Additional volumeMounts to the server main container. - volumeMounts: [] - - ## Additional volumes to the controller pod. - volumes: [] - - ## Node selectors and tolerations for server scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## + # -- [Node selector] nodeSelector: {} + # -- [Tolerations] for use with node taints tolerations: [] + # -- Assign custom [affinity] rules to the deployment affinity: {} - priorityClassName: "" - - ## Labels to set container specific security contexts - containerSecurityContext: - {} - # capabilities: - # drop: - # - all - # readOnlyRootFilesystem: true + # -- Assign custom [TopologySpreadConstraints] rules to the Argo CD server + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule - resources: {} - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 50m - # memory: 64Mi + # -- Priority class for the Argo CD server + priorityClassName: "" - ## Certificate configuration + # TLS certificate configuration via cert-manager + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#tls-certificates-used-by-argocd-server certificate: + # -- Deploy a Certificate resource (requires cert-manager) enabled: false + # -- The name of the Secret that will be automatically created and managed by this Certificate resource + secretName: argocd-server-tls + # -- Certificate primary domain (commonName) domain: argocd.example.com - issuer: - kind: # ClusterIssuer - name: # letsencrypt + # -- Certificate Subject Alternate Names (SANs) additionalHosts: [] - secretName: argocd-server-tls + # -- The requested 'duration' (i.e. lifetime) of the certificate. + # @default -- `""` (defaults to 2160h = 90d if not specified) + ## Ref: https://cert-manager.io/docs/usage/certificate/#renewal + duration: "" + # -- How long before the expiry a certificate should be renewed. + # @default -- `""` (defaults to 360h = 15d if not specified) + ## Ref: https://cert-manager.io/docs/usage/certificate/#renewal + renewBefore: "" + # Certificate issuer + ## Ref: https://cert-manager.io/docs/concepts/issuer + issuer: + # -- Certificate issuer group. Set if using an external issuer. Eg. `cert-manager.io` + group: "" + # -- Certificate issuer kind. Either `Issuer` or `ClusterIssuer` + kind: "" + # -- Certificate isser name. Eg. `letsencrypt` + name: "" + # Private key of the certificate + privateKey: + # -- Rotation policy of private key when certificate is re-issued. Either: `Never` or `Always` + rotationPolicy: Never + # -- The private key cryptography standards (PKCS) encoding for private key. Either: `PCKS1` or `PKCS8` + encoding: PKCS1 + # -- Algorithm used to generate certificate private key. One of: `RSA`, `Ed25519` or `ECDSA` + algorithm: RSA + # -- Key bit size of the private key. If algorithm is set to `Ed25519`, size is ignored. + size: 2048 + + # TLS certificate configuration via Secret + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#tls-certificates-used-by-argocd-server + certificateSecret: + # -- Create argocd-server-tls secret + enabled: false + # -- Annotations to be added to argocd-server-tls secret + annotations: {} + # -- Labels to be added to argocd-server-tls secret + labels: {} + # -- Private Key of the certificate + key: '' + # -- Certificate data + crt: '' ## Server service configuration service: + # -- Server service annotations annotations: {} + # -- Server service labels labels: {} + # -- Server service type type: ClusterIP - ## For node port default ports + # -- Server service http port for NodePort service type (only if `server.service.type` is set to "NodePort") nodePortHttp: 30080 + # -- Server service https port for NodePort service type (only if `server.service.type` is set to "NodePort") nodePortHttps: 30443 + # -- Server service http port servicePortHttp: 80 + # -- Server service https port servicePortHttps: 443 + # -- Server service http port name, can be used to route traffic via istio servicePortHttpName: http + # -- Server service https port name, can be used to route traffic via istio servicePortHttpsName: https - namedTargetPort: true + # -- LoadBalancer will get created with the IP specified in this field loadBalancerIP: "" + # -- Source IP ranges to allow access to service from loadBalancerSourceRanges: [] + # -- Server service external IPs externalIPs: [] + # -- Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints externalTrafficPolicy: "" + # -- Used to maintain session affinity. Supports `ClientIP` and `None` sessionAffinity: "" ## Server metrics service configuration metrics: + # -- Deploy metrics service enabled: false service: + # -- Metrics service annotations annotations: {} + # -- Metrics service labels labels: {} + # -- Metrics service port servicePort: 8083 + # -- Metrics service port name + portName: http-metrics serviceMonitor: + # -- Enable a prometheus ServiceMonitor enabled: false + # -- Prometheus ServiceMonitor interval interval: 30s + # -- Prometheus [RelabelConfigs] to apply to samples before scraping relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion metricRelabelings: [] - # selector: - # prometheus: kube-prometheus - # namespace: monitoring - # additionalLabels: {} + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- Prometheus ServiceMonitor namespace + namespace: "" # monitoring + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} serviceAccount: + # -- Create server service account create: true + # -- Server service account name name: argocd-server - ## Annotations applied to created service account + # -- Annotations applied to created service account annotations: {} - ## Automount API credentials for the Service Account + # -- Labels applied to created service account + labels: {} + # -- Automount API credentials for the Service Account automountServiceAccountToken: true ingress: + # -- Enable an ingress resource for the Argo CD server enabled: false + # -- Additional ingress annotations annotations: {} + # -- Additional ingress labels labels: {} + # -- Defines which ingress controller will implement the resource ingressClassName: "" + # -- List of ingress hosts ## Argo Ingress. ## Hostnames must be provided if Ingress is enabled. ## Secrets must be manually created in the namespace - ## - hosts: - [] + hosts: [] # - argocd.example.com + + # -- List of ingress paths paths: - / + # -- Ingress path type. One of `Exact`, `Prefix` or `ImplementationSpecific` pathType: Prefix - extraPaths: - [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - ## for Kubernetes >=1.19 (when "networking.k8s.io/v1" is used) + # -- Additional ingress paths + extraPaths: [] # - path: /* # pathType: Prefix # backend: @@ -631,48 +1670,57 @@ server: # name: ssl-redirect # port: # name: use-annotation - tls: - [] - # - secretName: argocd-tls-certificate + + # -- Ingress TLS configuration + tls: [] + # - secretName: your-certificate-name # hosts: # - argocd.example.com + + # -- Uses `server.service.servicePortHttps` instead `server.service.servicePortHttp` https: false + # dedicated ingress for gRPC as documented at - # https://argoproj.github.io/argo-cd/operator-manual/ingress/ + # Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/ingress/ ingressGrpc: + # -- Enable an ingress resource for the Argo CD server for dedicated [gRPC-ingress] enabled: false + # -- Setup up gRPC ingress to work with an AWS ALB isAWSALB: false + # -- Additional ingress annotations for dedicated [gRPC-ingress] annotations: {} + # -- Additional ingress labels for dedicated [gRPC-ingress] labels: {} + # -- Defines which ingress controller will implement the resource [gRPC-ingress] ingressClassName: "" awsALB: + # -- Service type for the AWS ALB gRPC service ## Service Type if isAWSALB is set to true ## Can be of type NodePort or ClusterIP depending on which mode you are ## are running. Instance mode needs type NodePort, IP mode needs type ## ClusterIP ## Ref: https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/how-it-works/#ingress-traffic serviceType: NodePort - # This tells AWS to send traffic from the ALB using HTTP2. Can use GRPC as well if you want to leverage GRPC specific features + # -- Backend protocol version for the AWS ALB gRPC service + ## This tells AWS to send traffic from the ALB using HTTP2. Can use gRPC as well if you want to leverage gRPC specific features backendProtocolVersion: HTTP2 + # -- List of ingress hosts for dedicated [gRPC-ingress] ## Argo Ingress. ## Hostnames must be provided if Ingress is enabled. ## Secrets must be manually created in the namespace ## - hosts: - [] + hosts: [] # - argocd.example.com + + # -- List of ingress paths for dedicated [gRPC-ingress] paths: - / + # -- Ingress path type for dedicated [gRPC-ingress]. One of `Exact`, `Prefix` or `ImplementationSpecific` pathType: Prefix - extraPaths: - [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - ## for Kubernetes >=1.19 (when "networking.k8s.io/v1" is used) + # -- Additional ingress paths for dedicated [gRPC-ingress] + extraPaths: [] # - path: /* # pathType: Prefix # backend: @@ -680,11 +1728,14 @@ server: # name: ssl-redirect # port: # name: use-annotation - tls: - [] - # - secretName: argocd-tls-certificate + + # -- Ingress TLS configuration for dedicated [gRPC-ingress] + tls: [] + # - secretName: your-certificate-name # hosts: # - argocd.example.com + + # -- Uses `server.service.servicePortHttps` instead `server.service.servicePortHttp` https: false # Create a OpenShift Route with SSL passthrough for UI and CLI @@ -692,146 +1743,21 @@ server: # Find your domain with: kubectl describe --namespace=openshift-ingress-operator ingresscontroller/default | grep Domain: # If 'hostname' is an empty string "" OpenShift will create a hostname for you. route: + # -- Enable an OpenShift Route for the Argo CD server enabled: false + # -- Openshift Route annotations + annotations: {} + # -- Hostname of OpenShift Route hostname: "" + # -- Termination type of Openshift Route + termination_type: passthrough + # -- Termination policy of Openshift Route + termination_policy: None - ## ArgoCD config - ## reference https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/argocd-cm.yaml - configEnabled: true - config: - # Argo CD's externally facing base URL (optional). Required when configuring SSO - url: https://argocd.example.com - # Argo CD instance label key - application.instanceLabelKey: argocd.argoproj.io/instance - - # DEPRECATED: Please instead use configs.credentialTemplates and configs.repositories - # repositories: | - # - url: git@github.com:group/repo.git - # sshPrivateKeySecret: - # name: secret-name - # key: sshPrivateKey - # - type: helm - # url: https://charts.helm.sh/stable - # name: stable - # - type: helm - # url: https://argoproj.github.io/argo-helm - # name: argo - - # oidc.config: | - # name: AzureAD - # issuer: https://login.microsoftonline.com/TENANT_ID/v2.0 - # clientID: CLIENT_ID - # clientSecret: $oidc.azuread.clientSecret - # requestedIDTokenClaims: - # groups: - # essential: true - # requestedScopes: - # - openid - # - profile - # - email - - ## Annotations to be added to ArgoCD ConfigMap - configAnnotations: {} - - ## ArgoCD rbac config - ## reference https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/rbac.md - rbacConfig: - {} - # policy.csv is an file containing user-defined RBAC policies and role definitions (optional). - # Policy rules are in the form: - # p, subject, resource, action, object, effect - # Role definitions and bindings are in the form: - # g, subject, inherited-subject - # See https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/rbac.md for additional information. - # policy.csv: | - # # Grant all members of the group 'my-org:team-alpha; the ability to sync apps in 'my-project' - # p, my-org:team-alpha, applications, sync, my-project/*, allow - # # Grant all members of 'my-org:team-beta' admins - # g, my-org:team-beta, role:admin - # policy.default is the name of the default role which Argo CD will falls back to, when - # authorizing API requests (optional). If omitted or empty, users may be still be able to login, - # but will see no apps, projects, etc... - # policy.default: role:readonly - # scopes controls which OIDC scopes to examine during rbac enforcement (in addition to `sub` scope). - # If omitted, defaults to: '[groups]'. The scope value can be a string, or a list of strings. - # scopes: '[cognito:groups, email]' - - ## Annotations to be added to ArgoCD rbac ConfigMap - rbacConfigAnnotations: {} - - # Boolean determining whether or not to create the configmap. If false, it is expected the configmap will be created - # by something else. ArgoCD will not work if there is no configMap created with the name above. - rbacConfigCreate: true - - ## Not well tested and not well supported on release v1.0.0. - ## Applications - ## reference: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/ - additionalApplications: [] - # - name: guestbook - # namespace: argocd - # additionalLabels: {} - # additionalAnnotations: {} - # project: guestbook - # source: - # repoURL: https://github.com/argoproj/argocd-example-apps.git - # targetRevision: HEAD - # path: guestbook - # directory: - # recurse: true - # destination: - # server: https://kubernetes.default.svc - # namespace: guestbook - # syncPolicy: - # automated: - # prune: false - # selfHeal: false - - ## Projects - ## reference: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/ - additionalProjects: [] - # - name: guestbook - # namespace: argocd - # additionalLabels: {} - # additionalAnnotations: {} - # description: Example Project - # sourceRepos: - # - '*' - # destinations: - # - namespace: guestbook - # server: https://kubernetes.default.svc - # clusterResourceWhitelist: [] - # namespaceResourceBlacklist: - # - group: '' - # kind: ResourceQuota - # - group: '' - # kind: LimitRange - # - group: '' - # kind: NetworkPolicy - # orphanedResources: {} - # roles: [] - # namespaceResourceWhitelist: - # - group: 'apps' - # kind: Deployment - # - group: 'apps' - # kind: StatefulSet - # orphanedResources: {} - # roles: [] - # syncWindows: - # - kind: allow - # schedule: '10 1 * * *' - # duration: 1h - # applications: - # - '*-prod' - # manualSync: true - - ## Enable Admin ClusterRole resources. - ## Enable if you would like to grant rights to ArgoCD to deploy to the local Kubernetes cluster. - clusterAdminAccess: - enabled: true - - ## Enable BackendConfig custom resource for Google Kubernetes Engine GKEbackendConfig: + # -- Enable BackendConfig custom resource for Google Kubernetes Engine enabled: false + # -- [BackendConfigSpec] spec: {} # spec: # iap: @@ -839,163 +1765,320 @@ server: # oauthclientCredentials: # secretName: argocd-secret - extraContainers: [] - ## Additional containers to be added to the controller pod. - ## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. - # - name: my-sidecar - # image: nginx:latest - # - name: lemonldap-ng-controller - # image: lemonldapng/lemonldap-ng-controller:0.2.0 - # args: - # - /lemonldap-ng-controller - # - --alsologtostderr - # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration - # env: - # - name: POD_NAME - # valueFrom: - # fieldRef: - # fieldPath: metadata.name - # - name: POD_NAMESPACE - # valueFrom: - # fieldRef: - # fieldPath: metadata.namespace - # volumeMounts: - # - name: copy-portal-skins - # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + ## Create a Google Managed Certificate for use with the GKE Ingress Controller + ## https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs + GKEmanagedCertificate: + # -- Enable ManagedCertificate custom resource for Google Kubernetes Engine. + enabled: false + # -- Domains for the Google Managed Certificate + domains: + - argocd.example.com + + ## Create a Google FrontendConfig Custom Resource, for use with the GKE Ingress Controller + ## https://cloud.google.com/kubernetes-engine/docs/how-to/ingress-features#configuring_ingress_features_through_frontendconfig_parameters + GKEfrontendConfig: + # -- Enable FrontConfig custom resource for Google Kubernetes Engine + enabled: false + # -- [FrontendConfigSpec] + spec: {} + # spec: + # redirectToHttps: + # enabled: true + # responseCodeName: RESPONSE_CODE ## Repo Server repoServer: + # -- Repo server name name: repo-server + # -- The number of repo server pods to run replicas: 1 + ## Repo server Horizontal Pod Autoscaler autoscaling: + # -- Enable Horizontal Pod Autoscaler ([HPA]) for the repo server enabled: false + # -- Minimum number of replicas for the repo server [HPA] minReplicas: 1 + # -- Maximum number of replicas for the repo server [HPA] maxReplicas: 5 + # -- Average CPU utilization percentage for the repo server [HPA] targetCPUUtilizationPercentage: 50 + # -- Average memory utilization percentage for the repo server [HPA] targetMemoryUtilizationPercentage: 50 - + # -- Configures the scaling behavior of the target in both Up and Down directions. + # This is only available on HPA apiVersion `autoscaling/v2beta2` and newer + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + + ## Repo server Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the repo server + enabled: false + # -- Labels to be added to repo server pdb + labels: {} + # -- Annotations to be added to repo server pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `repoServer.pdb.minAvailable` + maxUnavailable: "" + + ## Repo server image image: - repository: # defaults to global.image.repository - tag: # defaults to global.image.tag - imagePullPolicy: # IfNotPresent + # -- Repository to use for the repo server + # @default -- `""` (defaults to global.image.repository) + repository: "" + # -- Tag to use for the repo server + # @default -- `""` (defaults to global.image.tag) + tag: "" + # -- Image pull policy for the repo server + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- Secrets with credentials to pull images from a private registry + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] - ## Additional command line arguments to pass to argocd-repo-server - ## + # -- Additional command line arguments to pass to repo server extraArgs: [] - ## Environment variables to pass to argocd-repo-server - ## + # -- Environment variables to pass to repo server env: [] - ## envFrom to pass to argocd-repo-server - ## + # -- envFrom to pass to repo server + # @default -- `[]` (See [values.yaml]) envFrom: [] # - configMapRef: # name: config-map-name # - secretRef: # name: secret-name - ## Argo repoServer log format: text|json - logFormat: text - ## Argo repoServer log level - logLevel: info + # -- Additional containers to be added to the repo server pod + ## Ref: https://argo-cd.readthedocs.io/en/stable/user-guide/config-management-plugins/ + ## Note: Supports use of custom Helm templates + extraContainers: [] + # - name: cmp + # # Entrypoint should be Argo CD lightweight CMP server i.e. argocd-cmp-server + # command: [/var/run/argocd/argocd-cmp-server] + # # This can be off-the-shelf or custom-built image + # image: busybox + # securityContext: + # runAsNonRoot: true + # runAsUser: 999 + # volumeMounts: + # - mountPath: /var/run/argocd + # name: var-files + # - mountPath: /home/argocd/cmp-server/plugins + # name: plugins + # # Remove this volumeMount if you've chosen to bake the config file into the sidecar image. + # - mountPath: /home/argocd/cmp-server/config/plugin.yaml + # subPath: plugin.yaml + # name: cmp-plugin + # # Starting with v2.4, do NOT mount the same tmp volume as the repo-server container. The filesystem separation helps + # # mitigate path traversal attacks. + # - mountPath: /tmp + # name: cmp-tmp + + # -- Init containers to add to the repo server pods + initContainers: [] + + # -- Additional volumeMounts to the repo server main container + volumeMounts: [] - ## Annotations to be added to repo server pods - ## - podAnnotations: {} + # -- Additional volumes to the repo server pod + volumes: [] + # - name: cmp-plugin + # configMap: + # name: cmp-plugin + # - name: cmp-tmp + # emptyDir: {} + + # -- Annotations to be added to repo server Deployment + deploymentAnnotations: {} + + # -- Annotations to be added to repo server pods + podAnnotations: {} - ## Labels to be added to repo server pods - ## + # -- Labels to be added to repo server pods podLabels: {} - ## Configures the repo server port - containerPort: 8081 + # -- Resource limits and requests for the repo server pods + resources: {} + # limits: + # cpu: 50m + # memory: 128Mi + # requests: + # cpu: 10m + # memory: 64Mi + + # Repo server container ports + containerPorts: + # -- Repo server container port + server: 8081 + # -- Metrics container port + metrics: 8084 + + # -- Repo server container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL ## Readiness and liveness probes for default backend ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ - ## readinessProbe: + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed successThreshold: 1 + # -- Number of seconds after which the [probe] times out timeoutSeconds: 1 + livenessProbe: + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed successThreshold: 1 + # -- Number of seconds after which the [probe] times out timeoutSeconds: 1 - ## Additional volumeMounts to the repo server main container. - volumeMounts: [] - - ## Additional volumes to the repo server pod. - volumes: [] - - ## Node selectors and tolerations for server scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## + # -- [Node selector] nodeSelector: {} + # -- [Tolerations] for use with node taints tolerations: [] + # -- Assign custom [affinity] rules to the deployment affinity: {} - priorityClassName: "" + # -- Assign custom [TopologySpreadConstraints] rules to the repo server + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule - ## Labels to set container specific security contexts - containerSecurityContext: - {} - # capabilities: - # drop: - # - all - # readOnlyRootFilesystem: true + # -- Priority class for the repo server + priorityClassName: "" - resources: {} - # limits: - # cpu: 50m - # memory: 128Mi - # requests: - # cpu: 10m - # memory: 64Mi + # TLS certificate configuration via Secret + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#configuring-tls-to-argocd-repo-server + ## Note: Issuing certificates via cert-manager in not supported right now because it's not possible to restart repo server automatically without extra controllers. + certificateSecret: + # -- Create argocd-repo-server-tls secret + enabled: false + # -- Annotations to be added to argocd-repo-server-tls secret + annotations: {} + # -- Labels to be added to argocd-repo-server-tls secret + labels: {} + # -- Certificate authority. Required for self-signed certificates. + ca: '' + # -- Certificate private key + key: '' + # -- Certificate data. Must contain SANs of Repo service (ie: argocd-repo-server, argocd-repo-server.argo-cd.svc) + crt: '' ## Repo server service configuration service: + # -- Repo server service annotations annotations: {} + # -- Repo server service labels labels: {} + # -- Repo server service port port: 8081 + # -- Repo server service port name portName: https-repo-server ## Repo server metrics service configuration metrics: + # -- Deploy metrics service enabled: false service: + # -- Metrics service annotations annotations: {} + # -- Metrics service labels labels: {} + # -- Metrics service port servicePort: 8084 + # -- Metrics service port name + portName: http-metrics serviceMonitor: + # -- Enable a prometheus ServiceMonitor enabled: false + # -- Prometheus ServiceMonitor interval interval: 30s + # -- Prometheus [RelabelConfigs] to apply to samples before scraping relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion metricRelabelings: [] - # selector: - # prometheus: kube-prometheus - # namespace: monitoring - # additionalLabels: {} + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- Prometheus ServiceMonitor namespace + namespace: "" # "monitoring" + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} + + ## Enable Custom Rules for the Repo server's Cluster Role resource + ## Enable this and set the rules: to whatever custom rules you want for the Cluster Role resource. + ## Defaults to off + clusterRoleRules: + # -- Enable custom rules for the Repo server's Cluster Role resource + enabled: false + # -- List of custom rules for the Repo server's Cluster Role resource + rules: [] ## Repo server service account ## If create is set to true, make sure to uncomment the name and update the rbac section below serviceAccount: - create: false - # name: argocd-repo-server - ## Annotations applied to created service account + # -- Create repo server service account + create: true + # -- Repo server service account name + name: "" # "argocd-repo-server" + # -- Annotations applied to created service account annotations: {} - ## Automount API credentials for the Service Account + # -- Labels applied to created service account + labels: {} + # -- Automount API credentials for the Service Account automountServiceAccountToken: true - ## Repo server rbac rules - # rbac: + # -- Repo server rbac rules + rbac: [] # - apiGroups: # - argoproj.io # resources: @@ -1005,212 +2088,856 @@ repoServer: # - list # - watch - ## Use init containers to configure custom tooling - ## https://argoproj.github.io/argo-cd/operator-manual/custom_tools/ - ## When using the volumes & volumeMounts section bellow, please comment out those above. - # volumes: - # - name: custom-tools - # emptyDir: {} - # - # initContainers: - # - name: download-tools - # image: alpine:3.8 - # command: [sh, -c] - # args: - # - wget -qO- https://get.helm.sh/helm-v2.16.1-linux-amd64.tar.gz | tar -xvzf - && - # mv linux-amd64/helm /custom-tools/ - # volumeMounts: - # - mountPath: /custom-tools - # name: custom-tools - # volumeMounts: - # - mountPath: /usr/local/bin/helm - # name: custom-tools - # subPath: helm +## ApplicationSet controller +applicationSet: + # -- Enable ApplicationSet controller + enabled: true -## Argo Configs -configs: - ## External Cluster Credentials - ## reference: - ## - https://argoproj.github.io/argo-cd/operator-manual/declarative-setup/#clusters - ## - https://argoproj.github.io/argo-cd/operator-manual/security/#external-cluster-credentials - clusterCredentials: [] - # - name: mycluster - # server: https://mycluster.com - # labels: {} - # annotations: {} - # config: - # bearerToken: "" - # tlsClientConfig: - # insecure: false - # caData: "" - # - name: mycluster2 - # server: https://mycluster2.com - # labels: {} - # annotations: {} - # namespaces: namespace1,namespace2 - # config: - # bearerToken: "" - # tlsClientConfig: - # insecure: false - # caData: "" + # -- ApplicationSet controller name string + name: applicationset-controller - gpgKeysAnnotations: {} - gpgKeys: {} - # 4AEE18F83AFDEB23: | - # -----BEGIN PGP PUBLIC KEY BLOCK----- - # - # mQENBFmUaEEBCACzXTDt6ZnyaVtueZASBzgnAmK13q9Urgch+sKYeIhdymjuMQta - # x15OklctmrZtqre5kwPUosG3/B2/ikuPYElcHgGPL4uL5Em6S5C/oozfkYzhwRrT - # SQzvYjsE4I34To4UdE9KA97wrQjGoz2Bx72WDLyWwctD3DKQtYeHXswXXtXwKfjQ - # 7Fy4+Bf5IPh76dA8NJ6UtjjLIDlKqdxLW4atHe6xWFaJ+XdLUtsAroZcXBeWDCPa - # buXCDscJcLJRKZVc62gOZXXtPfoHqvUPp3nuLA4YjH9bphbrMWMf810Wxz9JTd3v - # yWgGqNY0zbBqeZoGv+TuExlRHT8ASGFS9SVDABEBAAG0NUdpdEh1YiAod2ViLWZs - # b3cgY29tbWl0IHNpZ25pbmcpIDxub3JlcGx5QGdpdGh1Yi5jb20+iQEiBBMBCAAW - # BQJZlGhBCRBK7hj4Ov3rIwIbAwIZAQAAmQEH/iATWFmi2oxlBh3wAsySNCNV4IPf - # DDMeh6j80WT7cgoX7V7xqJOxrfrqPEthQ3hgHIm7b5MPQlUr2q+UPL22t/I+ESF6 - # 9b0QWLFSMJbMSk+BXkvSjH9q8jAO0986/pShPV5DU2sMxnx4LfLfHNhTzjXKokws - # +8ptJ8uhMNIDXfXuzkZHIxoXk3rNcjDN5c5X+sK8UBRH092BIJWCOfaQt7v7wig5 - # 4Ra28pM9GbHKXVNxmdLpCFyzvyMuCmINYYADsC848QQFFwnd4EQnupo6QvhEVx1O - # j7wDwvuH5dCrLuLwtwXaQh0onG4583p0LGms2Mf5F+Ick6o/4peOlBoZz48= - # =Bvzs - # -----END PGP PUBLIC KEY BLOCK----- - - knownHostsAnnotations: {} - knownHosts: - data: - ssh_known_hosts: | - bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw== - github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ== - gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY= - gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf - gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9 - ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H - vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H - tlsCertsAnnotations: {} - tlsCerts: - {} - # data: - # argocd.example.com: | - # -----BEGIN CERTIFICATE----- - # MIIF1zCCA7+gAwIBAgIUQdTcSHY2Sxd3Tq/v1eIEZPCNbOowDQYJKoZIhvcNAQEL - # BQAwezELMAkGA1UEBhMCREUxFTATBgNVBAgMDExvd2VyIFNheG9ueTEQMA4GA1UE - # BwwHSGFub3ZlcjEVMBMGA1UECgwMVGVzdGluZyBDb3JwMRIwEAYDVQQLDAlUZXN0 - # c3VpdGUxGDAWBgNVBAMMD2Jhci5leGFtcGxlLmNvbTAeFw0xOTA3MDgxMzU2MTda - # Fw0yMDA3MDcxMzU2MTdaMHsxCzAJBgNVBAYTAkRFMRUwEwYDVQQIDAxMb3dlciBT - # YXhvbnkxEDAOBgNVBAcMB0hhbm92ZXIxFTATBgNVBAoMDFRlc3RpbmcgQ29ycDES - # MBAGA1UECwwJVGVzdHN1aXRlMRgwFgYDVQQDDA9iYXIuZXhhbXBsZS5jb20wggIi - # MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCv4mHMdVUcafmaSHVpUM0zZWp5 - # NFXfboxA4inuOkE8kZlbGSe7wiG9WqLirdr39Ts+WSAFA6oANvbzlu3JrEQ2CHPc - # CNQm6diPREFwcDPFCe/eMawbwkQAPVSHPts0UoRxnpZox5pn69ghncBR+jtvx+/u - # P6HdwW0qqTvfJnfAF1hBJ4oIk2AXiip5kkIznsAh9W6WRy6nTVCeetmIepDOGe0G - # ZJIRn/OfSz7NzKylfDCat2z3EAutyeT/5oXZoWOmGg/8T7pn/pR588GoYYKRQnp+ - # YilqCPFX+az09EqqK/iHXnkdZ/Z2fCuU+9M/Zhrnlwlygl3RuVBI6xhm/ZsXtL2E - # Gxa61lNy6pyx5+hSxHEFEJshXLtioRd702VdLKxEOuYSXKeJDs1x9o6cJ75S6hko - # Ml1L4zCU+xEsMcvb1iQ2n7PZdacqhkFRUVVVmJ56th8aYyX7KNX6M9CD+kMpNm6J - # kKC1li/Iy+RI138bAvaFplajMF551kt44dSvIoJIbTr1LigudzWPqk31QaZXV/4u - # kD1n4p/XMc9HYU/was/CmQBFqmIZedTLTtK7clkuFN6wbwzdo1wmUNgnySQuMacO - # gxhHxxzRWxd24uLyk9Px+9U3BfVPaRLiOPaPoC58lyVOykjSgfpgbus7JS69fCq7 - # bEH4Jatp/10zkco+UQIDAQABo1MwUTAdBgNVHQ4EFgQUjXH6PHi92y4C4hQpey86 - # r6+x1ewwHwYDVR0jBBgwFoAUjXH6PHi92y4C4hQpey86r6+x1ewwDwYDVR0TAQH/ - # BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAFE4SdKsX9UsLy+Z0xuHSxhTd0jfn - # Iih5mtzb8CDNO5oTw4z0aMeAvpsUvjJ/XjgxnkiRACXh7K9hsG2r+ageRWGevyvx - # CaRXFbherV1kTnZw4Y9/pgZTYVWs9jlqFOppz5sStkfjsDQ5lmPJGDii/StENAz2 - # XmtiPOgfG9Upb0GAJBCuKnrU9bIcT4L20gd2F4Y14ccyjlf8UiUi192IX6yM9OjT - # +TuXwZgqnTOq6piVgr+FTSa24qSvaXb5z/mJDLlk23npecTouLg83TNSn3R6fYQr - # d/Y9eXuUJ8U7/qTh2Ulz071AO9KzPOmleYPTx4Xty4xAtWi1QE5NHW9/Ajlv5OtO - # OnMNWIs7ssDJBsB7VFC8hcwf79jz7kC0xmQqDfw51Xhhk04kla+v+HZcFW2AO9so - # 6ZdVHHQnIbJa7yQJKZ+hK49IOoBR6JgdB5kymoplLLiuqZSYTcwSBZ72FYTm3iAr - # jzvt1hxpxVDmXvRnkhRrIRhK4QgJL0jRmirBjDY+PYYd7bdRIjN7WNZLFsgplnS8 - # 9w6CwG32pRlm0c8kkiQ7FXA6BYCqOsDI8f1VGQv331OpR2Ck+FTv+L7DAmg6l37W - # +LB9LGh4OAp68ImTjqf6ioGKG0RBSznwME+r4nXtT1S/qLR6ASWUS4ViWRhbRlNK - # XWyb96wrUlv+E8I= - # -----END CERTIFICATE----- -## # Creates a secret with optional repository credentials -## DEPRECATED: Instead, use configs.credentialTemplates and/or configs.repositories - repositoryCredentials: {} + # -- The number of ApplicationSet controller pods to run + replicaCount: 1 -## Creates a secret for each key/value specified below to create repository credentials - credentialTemplates: {} - # github-enterprise-creds-1: - # url: https://github.com/argoproj - # githubAppID: 1 - # githubAppInstallationID: 2 - # githubAppEnterpriseBaseUrl: https://ghe.example.com/api/v3 - # githubAppPrivateKey: | - # -----BEGIN OPENSSH PRIVATE KEY----- - # ... - # -----END OPENSSH PRIVATE KEY----- - # https-creds: - # url: https://github.com/argoproj - # password: my-password - # username: my-username - # ssh-creds: - # url: git@github.com:argoproj-labs - # sshPrivateKey: | - # -----BEGIN OPENSSH PRIVATE KEY----- - # ... - # -----END OPENSSH PRIVATE KEY----- + ## ApplicationSet controller Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the ApplicationSet controller + enabled: false + # -- Labels to be added to ApplicationSet controller pdb + labels: {} + # -- Annotations to be added to ApplicationSet controller pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `applicationSet.pdb.minAvailable` + maxUnavailable: "" + + ## ApplicationSet controller image + image: + # -- Repository to use for the ApplicationSet controller + # @default -- `""` (defaults to global.image.repository) + repository: "" + # -- Tag to use for the ApplicationSet controller + # @default -- `""` (defaults to global.image.tag) + tag: "" + # -- Image pull policy for the ApplicationSet controller + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- If defined, uses a Secret to pull an image from a private Docker registry or repository. + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] -## Creates a secret for each key/value specified below to create repositories -## Note: the last example in the list would use a repository credential template, configured under "configs.repositoryCredentials". - repositories: {} - # istio-helm-repo: - # url: https://storage.googleapis.com/istio-prerelease/daily-build/master-latest-daily/charts - # name: istio.io - # type: helm - # private-helm-repo: - # url: https://my-private-chart-repo.internal - # name: private-repo - # type: helm - # password: my-password - # username: my-username - # private-repo: - # url: https://github.com/argoproj/private-repo + # -- ApplicationSet controller log format. Either `text` or `json` + # @default -- `""` (defaults to global.logging.format) + logFormat: "" + # -- ApplicationSet controller log level. One of: `debug`, `info`, `warn`, `error` + # @default -- `""` (defaults to global.logging.level) + logLevel: "" + + args: + # -- How application is synced between the generator and the cluster + policy: sync + # -- Enable dry run mode + dryRun: false + + # -- List of extra cli args to add + extraArgs: [] + + # -- Environment variables to pass to the ApplicationSet controller + extraEnv: [] + # - name: "MY_VAR" + # value: "value" + + # -- envFrom to pass to the ApplicationSet controller + # @default -- `[]` (See [values.yaml]) + extraEnvFrom: [] + # - configMapRef: + # name: config-map-name + # - secretRef: + # name: secret-name + + # -- Additional containers to be added to the ApplicationSet controller pod + ## Note: Supports use of custom Helm templates + extraContainers: [] + + # -- Init containers to add to the ApplicationSet controller pod + ## Note: Supports use of custom Helm templates + initContainers: [] + + # -- List of extra mounts to add (normally used with extraVolumes) + extraVolumeMounts: [] + + # -- List of extra volumes to add + extraVolumes: [] + + ## Metrics service configuration + metrics: + # -- Deploy metrics service + enabled: false + service: + # -- Metrics service annotations + annotations: {} + # -- Metrics service labels + labels: {} + # -- Metrics service port + servicePort: 8085 + # -- Metrics service port name + portName: http-metrics + serviceMonitor: + # -- Enable a prometheus ServiceMonitor + enabled: false + # -- Prometheus ServiceMonitor interval + interval: 30s + # -- Prometheus [RelabelConfigs] to apply to samples before scraping + relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion + metricRelabelings: [] + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- Prometheus ServiceMonitor namespace + namespace: "" # monitoring + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} + + ## ApplicationSet service configuration + service: + # -- ApplicationSet service annotations + annotations: {} + # -- ApplicationSet service labels + labels: {} + # -- ApplicationSet service port + port: 7000 + # -- ApplicationSet service port name + portName: webhook + + serviceAccount: + # -- Create ApplicationSet controller service account + create: true + # -- ApplicationSet controller service account name + name: argocd-applicationset-controller + # -- Annotations applied to created service account + annotations: {} + # -- Labels applied to created service account + labels: {} + # -- Automount API credentials for the Service Account + automountServiceAccountToken: true + + # -- Annotations to be added to ApplicationSet controller Deployment + deploymentAnnotations: {} + + # -- Annotations for the ApplicationSet controller pods + podAnnotations: {} + + # -- Labels for the ApplicationSet controller pods + podLabels: {} + + # -- Resource limits and requests for the ApplicationSet controller pods. + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + # ApplicationSet controller container ports + containerPorts: + # -- Metrics container port + metrics: 8080 + # -- Probe container port + probe: 8081 + # -- Webhook container port + webhook: 7000 + + # -- ApplicationSet controller container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + ## Probes for ApplicationSet controller (optional) + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + readinessProbe: + # -- Enable Kubernetes liveness probe for ApplicationSet controller + enabled: false + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + + livenessProbe: + # -- Enable Kubernetes liveness probe for ApplicationSet controller + enabled: false + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + + # -- [Node selector] + nodeSelector: {} + + # -- [Tolerations] for use with node taints + tolerations: [] + + # -- Assign custom [affinity] rules + affinity: {} + + # -- If specified, indicates the pod's priority. If not specified, the pod priority will be default or zero if there is no default. + priorityClassName: "" + + ## Webhook for the Git Generator + ## Ref: https://argocd-applicationset.readthedocs.io/en/master/Generators-Git/#webhook-configuration) + webhook: + ingress: + # -- Enable an ingress resource for Webhooks + enabled: false + # -- Additional ingress annotations + annotations: {} + # -- Additional ingress labels + labels: {} + # -- Defines which ingress ApplicationSet controller will implement the resource + ingressClassName: "" + + # -- List of ingress hosts + ## Hostnames must be provided if Ingress is enabled. + ## Secrets must be manually created in the namespace + hosts: [] + # - argocd-applicationset.example.com + + # -- List of ingress paths + paths: + - /api/webhook + # -- Ingress path type. One of `Exact`, `Prefix` or `ImplementationSpecific` + pathType: Prefix + # -- Additional ingress paths + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + ## for Kubernetes >=1.19 (when "networking.k8s.io/v1" is used) + # - path: /* + # pathType: Prefix + # backend: + # service: + # name: ssl-redirect + # port: + # name: use-annotation + + # -- Ingress TLS configuration + tls: [] + # - secretName: argocd-applicationset-tls + # hosts: + # - argocd-applicationset.example.com + +## Notifications controller +notifications: + # -- Enable notifications controller + enabled: true + + # -- Notifications controller name string + name: notifications-controller + + # -- Argo CD dashboard url; used in place of {{.context.argocdUrl}} in templates + argocdUrl: + + ## Notifications controller Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the notifications controller + enabled: false + # -- Labels to be added to notifications controller pdb + labels: {} + # -- Annotations to be added to notifications controller pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `notifications.pdb.minAvailable` + maxUnavailable: "" + + ## Notifications controller image + image: + # -- Repository to use for the notifications controller + # @default -- `""` (defaults to global.image.repository) + repository: "" + # -- Tag to use for the notifications controller + # @default -- `""` (defaults to global.image.tag) + tag: "" + # -- Image pull policy for the notifications controller + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- Secrets with credentials to pull images from a private registry + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] + + # -- Notifications controller log format. Either `text` or `json` + # @default -- `""` (defaults to global.logging.format) + logFormat: "" + # -- Notifications controller log level. One of: `debug`, `info`, `warn`, `error` + # @default -- `""` (defaults to global.logging.level) + logLevel: "" + + # -- Extra arguments to provide to the notifications controller + extraArgs: [] + + # -- Additional container environment variables + extraEnv: [] + + # -- envFrom to pass to the notifications controller + # @default -- `[]` (See [values.yaml]) + extraEnvFrom: [] + # - configMapRef: + # name: config-map-name + # - secretRef: + # name: secret-name + + # -- Additional containers to be added to the notifications controller pod + ## Note: Supports use of custom Helm templates + extraContainers: [] + + # -- Init containers to add to the notifications controller pod + ## Note: Supports use of custom Helm templates + initContainers: [] + + # -- List of extra mounts to add (normally used with extraVolumes) + extraVolumeMounts: [] + + # -- List of extra volumes to add + extraVolumes: [] + + # -- Define user-defined context + ## For more information: https://argocd-notifications.readthedocs.io/en/stable/templates/#defining-user-defined-context + context: {} + # region: east + # environmentName: staging secret: - createSecret: true - ## Annotations to be added to argocd-secret - ## + # -- Whether helm chart creates notifications controller secret + create: true + + # -- key:value pairs of annotations to be added to the secret annotations: {} - # Webhook Configs - githubSecret: "" - gitlabSecret: "" - bitbucketServerSecret: "" - bitbucketUUID: "" - gogsSecret: "" + # -- Generic key:value pairs to be inserted into the secret + ## Can be used for templates, notification services etc. Some examples given below. + ## For more information: https://argocd-notifications.readthedocs.io/en/stable/services/overview/ + items: {} + # slack-token: + # # For more information: https://argocd-notifications.readthedocs.io/en/stable/services/slack/ - # Custom secrets. Useful for injecting SSO secrets into environment variables. - # Ref: https://argoproj.github.io/argo-cd/operator-manual/sso/ - # Note that all values must be non-empty. - extra: - {} - # LDAP_PASSWORD: "mypassword" + # grafana-apiKey: + # # For more information: https://argocd-notifications.readthedocs.io/en/stable/services/grafana/ - # Argo TLS Data. - argocdServerTlsConfig: - {} - # key: - # crt: | - # -----BEGIN CERTIFICATE----- - # - # -----END CERTIFICATE----- - # -----BEGIN CERTIFICATE----- - # - # -----END CERTIFICATE----- + # webhooks-github-token: - # Argo expects the password in the secret to be bcrypt hashed. You can create this hash with - # `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/'` - # argocdServerAdminPassword: "" - # Password modification time defaults to current time if not set - # argocdServerAdminPasswordMtime: "2006-01-02T15:04:05Z" + # email-username: + # email-password: + # For more information: https://argocd-notifications.readthedocs.io/en/stable/services/email/ - ## Custom CSS Styles - ## Reference: https://argo-cd.readthedocs.io/en/stable/operator-manual/custom-styles/ - # styles: | - # .nav-bar { - # background: linear-gradient(to bottom, #999, #777, #333, #222, #111); - # } + metrics: + # -- Enables prometheus metrics server + enabled: false + # -- Metrics port + port: 9001 + service: + # -- Metrics service annotations + annotations: {} + # -- Metrics service labels + labels: {} + # -- Metrics service port name + portName: http-metrics + serviceMonitor: + # -- Enable a prometheus ServiceMonitor + enabled: false + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- Prometheus [RelabelConfigs] to apply to samples before scraping + relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion + metricRelabelings: [] -openshift: - enabled: false + # -- Configures notification services such as slack, email or custom webhook + # @default -- See [values.yaml] + ## For more information: https://argocd-notifications.readthedocs.io/en/stable/services/overview/ + notifiers: {} + # service.slack: | + # token: $slack-token + + # -- Annotations to be applied to the notifications controller Deployment + deploymentAnnotations: {} + + # -- Annotations to be applied to the notifications controller Pods + podAnnotations: {} + + # -- Labels to be applied to the notifications controller Pods + podLabels: {} + + # -- Resource limits and requests for the notifications controller + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + # Notification controller container ports + containerPorts: + # -- Metrics container port + metrics: 9001 + + # -- Notification controller container-level security Context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + # -- [Node selector] + nodeSelector: {} + + # -- [Tolerations] for use with node taints + tolerations: [] + + # -- Assign custom [affinity] rules + affinity: {} + + # -- Priority class for the notifications controller pods + priorityClassName: "" + + serviceAccount: + # -- Create notifications controller service account + create: true + # -- Notification controller service account name + name: argocd-notifications-controller + # -- Annotations applied to created service account + annotations: {} + # -- Labels applied to created service account + labels: {} + # -- Automount API credentials for the Service Account + automountServiceAccountToken: true + + cm: + # -- Whether helm chart creates notifications controller config map + create: true + + # -- Contains centrally managed global application subscriptions + ## For more information: https://argocd-notifications.readthedocs.io/en/stable/subscriptions/ + subscriptions: [] + # # subscription for on-sync-status-unknown trigger notifications + # - recipients: + # - slack:test2 + # - email:test@gmail.com + # triggers: + # - on-sync-status-unknown + # # subscription restricted to applications with matching labels only + # - recipients: + # - slack:test3 + # selector: test=true + # triggers: + # - on-sync-status-unknown + + # -- The notification template is used to generate the notification content + ## For more information: https://argocd-notifications.readthedocs.io/en/stable/templates/ + templates: {} + # template.app-deployed: | + # email: + # subject: New version of an application {{.app.metadata.name}} is up and running. + # message: | + # {{if eq .serviceType "slack"}}:white_check_mark:{{end}} Application {{.app.metadata.name}} is now running new version of deployments manifests. + # slack: + # attachments: | + # [{ + # "title": "{{ .app.metadata.name}}", + # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + # "color": "#18be52", + # "fields": [ + # { + # "title": "Sync Status", + # "value": "{{.app.status.sync.status}}", + # "short": true + # }, + # { + # "title": "Repository", + # "value": "{{.app.spec.source.repoURL}}", + # "short": true + # }, + # { + # "title": "Revision", + # "value": "{{.app.status.sync.revision}}", + # "short": true + # } + # {{range $index, $c := .app.status.conditions}} + # {{if not $index}},{{end}} + # {{if $index}},{{end}} + # { + # "title": "{{$c.type}}", + # "value": "{{$c.message}}", + # "short": true + # } + # {{end}} + # ] + # }] + # template.app-health-degraded: | + # email: + # subject: Application {{.app.metadata.name}} has degraded. + # message: | + # {{if eq .serviceType "slack"}}:exclamation:{{end}} Application {{.app.metadata.name}} has degraded. + # Application details: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}. + # slack: + # attachments: |- + # [{ + # "title": "{{ .app.metadata.name}}", + # "title_link": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + # "color": "#f4c030", + # "fields": [ + # { + # "title": "Sync Status", + # "value": "{{.app.status.sync.status}}", + # "short": true + # }, + # { + # "title": "Repository", + # "value": "{{.app.spec.source.repoURL}}", + # "short": true + # } + # {{range $index, $c := .app.status.conditions}} + # {{if not $index}},{{end}} + # {{if $index}},{{end}} + # { + # "title": "{{$c.type}}", + # "value": "{{$c.message}}", + # "short": true + # } + # {{end}} + # ] + # }] + # template.app-sync-failed: | + # email: + # subject: Failed to sync application {{.app.metadata.name}}. + # message: | + # {{if eq .serviceType "slack"}}:exclamation:{{end}} The sync operation of application {{.app.metadata.name}} has failed at {{.app.status.operationState.finishedAt}} with the following error: {{.app.status.operationState.message}} + # Sync operation details are available at: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true . + # slack: + # attachments: |- + # [{ + # "title": "{{ .app.metadata.name}}", + # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + # "color": "#E96D76", + # "fields": [ + # { + # "title": "Sync Status", + # "value": "{{.app.status.sync.status}}", + # "short": true + # }, + # { + # "title": "Repository", + # "value": "{{.app.spec.source.repoURL}}", + # "short": true + # } + # {{range $index, $c := .app.status.conditions}} + # {{if not $index}},{{end}} + # {{if $index}},{{end}} + # { + # "title": "{{$c.type}}", + # "value": "{{$c.message}}", + # "short": true + # } + # {{end}} + # ] + # }] + # template.app-sync-running: | + # email: + # subject: Start syncing application {{.app.metadata.name}}. + # message: | + # The sync operation of application {{.app.metadata.name}} has started at {{.app.status.operationState.startedAt}}. + # Sync operation details are available at: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true . + # slack: + # attachments: |- + # [{ + # "title": "{{ .app.metadata.name}}", + # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + # "color": "#0DADEA", + # "fields": [ + # { + # "title": "Sync Status", + # "value": "{{.app.status.sync.status}}", + # "short": true + # }, + # { + # "title": "Repository", + # "value": "{{.app.spec.source.repoURL}}", + # "short": true + # } + # {{range $index, $c := .app.status.conditions}} + # {{if not $index}},{{end}} + # {{if $index}},{{end}} + # { + # "title": "{{$c.type}}", + # "value": "{{$c.message}}", + # "short": true + # } + # {{end}} + # ] + # }] + # template.app-sync-status-unknown: | + # email: + # subject: Application {{.app.metadata.name}} sync status is 'Unknown' + # message: | + # {{if eq .serviceType "slack"}}:exclamation:{{end}} Application {{.app.metadata.name}} sync is 'Unknown'. + # Application details: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}. + # {{if ne .serviceType "slack"}} + # {{range $c := .app.status.conditions}} + # * {{$c.message}} + # {{end}} + # {{end}} + # slack: + # attachments: |- + # [{ + # "title": "{{ .app.metadata.name}}", + # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + # "color": "#E96D76", + # "fields": [ + # { + # "title": "Sync Status", + # "value": "{{.app.status.sync.status}}", + # "short": true + # }, + # { + # "title": "Repository", + # "value": "{{.app.spec.source.repoURL}}", + # "short": true + # } + # {{range $index, $c := .app.status.conditions}} + # {{if not $index}},{{end}} + # {{if $index}},{{end}} + # { + # "title": "{{$c.type}}", + # "value": "{{$c.message}}", + # "short": true + # } + # {{end}} + # ] + # }] + # template.app-sync-succeeded: | + # email: + # subject: Application {{.app.metadata.name}} has been successfully synced. + # message: | + # {{if eq .serviceType "slack"}}:white_check_mark:{{end}} Application {{.app.metadata.name}} has been successfully synced at {{.app.status.operationState.finishedAt}}. + # Sync operation details are available at: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true . + # slack: + # attachments: |- + # [{ + # "title": "{{ .app.metadata.name}}", + # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + # "color": "#18be52", + # "fields": [ + # { + # "title": "Sync Status", + # "value": "{{.app.status.sync.status}}", + # "short": true + # }, + # { + # "title": "Repository", + # "value": "{{.app.spec.source.repoURL}}", + # "short": true + # } + # {{range $index, $c := .app.status.conditions}} + # {{if not $index}},{{end}} + # {{if $index}},{{end}} + # { + # "title": "{{$c.type}}", + # "value": "{{$c.message}}", + # "short": true + # } + # {{end}} + # ] + # }] + + # -- The trigger defines the condition when the notification should be sent + ## For more information: https://argocd-notifications.readthedocs.io/en/stable/triggers/ + triggers: {} + # trigger.on-deployed: | + # - description: Application is synced and healthy. Triggered once per commit. + # oncePer: app.status.sync.revision + # send: + # - app-deployed + # when: app.status.operationState.phase in ['Succeeded'] and app.status.health.status == 'Healthy' + # trigger.on-health-degraded: | + # - description: Application has degraded + # send: + # - app-health-degraded + # when: app.status.health.status == 'Degraded' + # trigger.on-sync-failed: | + # - description: Application syncing has failed + # send: + # - app-sync-failed + # when: app.status.operationState.phase in ['Error', 'Failed'] + # trigger.on-sync-running: | + # - description: Application is being synced + # send: + # - app-sync-running + # when: app.status.operationState.phase in ['Running'] + # trigger.on-sync-status-unknown: | + # - description: Application status is 'Unknown' + # send: + # - app-sync-status-unknown + # when: app.status.sync.status == 'Unknown' + # trigger.on-sync-succeeded: | + # - description: Application syncing has succeeded + # send: + # - app-sync-succeeded + # when: app.status.operationState.phase in ['Succeeded'] + # + # For more information: https://argocd-notifications.readthedocs.io/en/stable/triggers/#default-triggers + # defaultTriggers: | + # - on-sync-status-unknown + + ## The optional bot component simplifies managing subscriptions + ## For more information: https://argocd-notifications.readthedocs.io/en/stable/bots/overview/ + bots: + slack: + # -- Enable slack bot + ## You have to set secret.notifiers.slack.signingSecret + enabled: false + + ## Slack bot Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the Slack bot + enabled: false + # -- Labels to be added to Slack bot pdb + labels: {} + # -- Annotations to be added to Slack bot pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `notifications.bots.slack.pdb.minAvailable` + maxUnavailable: "" + + ## Slack bot image + image: + # -- Repository to use for the Slack bot + # @default -- `""` (defaults to global.image.repository) + repository: "" + # -- Tag to use for the Slack bot + # @default -- `""` (defaults to global.image.tag) + tag: "" + # -- Image pull policy for the Slack bot + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- Secrets with credentials to pull images from a private registry + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] + + # -- Slack bot log format. Either `text` or `json` + # @default -- `""` (defaults to global.logging.format) + logFormat: "" + # -- Slack bot log level. One of: `debug`, `info`, `warn`, `error` + # @default -- `""` (defaults to global.logging.level) + logLevel: "" + + # -- List of extra cli args to add for Slack bot + extraArgs: [] + + service: + # -- Service annotations for Slack bot + annotations: {} + # -- Service port for Slack bot + port: 80 + # -- Service type for Slack bot + type: LoadBalancer + + serviceAccount: + # -- Specifies whether a service account should be created + create: true + + # -- The name of the service account to use. + ## If not set and create is true, a name is generated using the fullname template + name: argocd-notifications-bot + + # -- Annotations applied to created service account + annotations: {} + + # -- Slack bot container-level security Context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + # -- Resource limits and requests for the Slack bot + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + # -- Assign custom [affinity] rules + affinity: {} + + # -- [Tolerations] for use with node taints + tolerations: [] + + # -- [Node selector] + nodeSelector: {} diff --git a/terraform/helm/nginx_values.yaml b/terraform/helm/nginx_values.yaml index 063160af..aba30ebb 100644 --- a/terraform/helm/nginx_values.yaml +++ b/terraform/helm/nginx_values.yaml @@ -1,5 +1,7 @@ +# default values: https://github.com/kubernetes/ingress-nginx/blob/helm-chart-4.3.0/charts/ingress-nginx/values.yaml + ## nginx configuration -## Ref: https://github.com/kubernetes/ingress/blob/main/controllers/nginx/configuration.md +## Ref: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/ ## controller: @@ -19,8 +21,8 @@ controller: ## node or nodes where an ingress controller pod is running. publishService: # ! This is required for external-dns to work properly - # ! https://github.com/kubernetes-sigs/external-dns/blob/main/docs/tutorials/azure.md#deploy-externaldns - # ! https://github.com/kubernetes-sigs/external-dns/blob/main/docs/faq.md#why-is-externaldns-only-adding-a-single-ip-address-in-route-53-on-aws-when-using-the-nginx-ingress-controller-how-do-i-get-it-to-use-the-fqdn-of-the-elb-assigned-to-my-nginx-ingress-controller-service-instead + # ! https://github.com/kubernetes-sigs/external-dns/blob/master/docs/tutorials/azure.md + # ! https://github.com/kubernetes-sigs/external-dns/blob/master/docs/faq.md#why-is-externaldns-only-adding-a-single-ip-address-in-route-53-on-aws-when-using-the-nginx-ingress-controller-how-do-i-get-it-to-use-the-fqdn-of-the-elb-assigned-to-my-nginx-ingress-controller-service-instead enabled: true ## Allows overriding of the publish service to bind to ## Must be / diff --git a/terraform/helm/velero_default_values.yaml b/terraform/helm/velero_default_values.yaml index 8bd33d56..08033459 100644 --- a/terraform/helm/velero_default_values.yaml +++ b/terraform/helm/velero_default_values.yaml @@ -1,14 +1,14 @@ -# source: https://github.com/vmware-tanzu/helm-charts/blob/velero-2.26.1/charts/velero/values.yaml +# source: https://github.com/vmware-tanzu/helm-charts/blob/velero-3.1.0/charts/velero/values.yaml ## ## Configuration settings that directly affect the Velero deployment YAML. ## # Details of the container image to use in the Velero deployment & daemonset (if -# enabling restic). Required. +# enabling node-agent). Required. image: repository: velero/velero - tag: v1.7.0 + tag: v1.10.0 # Digest value example: sha256:d238835e151cec91c6a811fe3a89a66d3231d9f64d09e5f3c49552672d271f38. # If used, it will take precedence over the image.tag. # digest: @@ -52,9 +52,16 @@ resources: dnsPolicy: ClusterFirst # Init containers to add to the Velero deployment's pod spec. At least one plugin provider image is required. -initContainers: [] +# If the value is a string then it is evaluated as a template. +initContainers: + # - name: velero-plugin-for-csi + # image: velero/velero-plugin-for-csi:v0.3.2 + # imagePullPolicy: IfNotPresent + # volumeMounts: + # - mountPath: /target + # name: plugins # - name: velero-plugin-for-aws - # image: velero/velero-plugin-for-aws:v1.3.0 + # image: velero/velero-plugin-for-aws:v1.5.2 # imagePullPolicy: IfNotPresent # volumeMounts: # - mountPath: /target @@ -75,9 +82,15 @@ containerSecurityContext: {} # add: [] # readOnlyRootFilesystem: true +# Container Lifecycle Hooks to use for the Velero deployment. Optional. +lifecycle: {} + # Pod priority class name to use for the Velero deployment. Optional. priorityClassName: "" +# The number of seconds to allow for graceful termination of the pod. Optional. +terminationGracePeriodSeconds: 3600 + # Tolerations to use for the Velero deployment. Optional. tolerations: [] @@ -87,12 +100,41 @@ affinity: {} # Node selector to use for the Velero deployment. Optional. nodeSelector: {} +# DNS configuration to use for the Velero deployment. Optional. +dnsConfig: {} + # Extra volumes for the Velero deployment. Optional. extraVolumes: [] # Extra volumeMounts for the Velero deployment. Optional. extraVolumeMounts: [] +# Extra K8s manifests to deploy +extraObjects: [] + # - apiVersion: secrets-store.csi.x-k8s.io/v1 + # kind: SecretProviderClass + # metadata: + # name: velero-secrets-store + # spec: + # provider: aws + # parameters: + # objects: | + # - objectName: "velero" + # objectType: "secretsmanager" + # jmesPath: + # - path: "access_key" + # objectAlias: "access_key" + # - path: "secret_key" + # objectAlias: "secret_key" + # secretObjects: + # - data: + # - key: access_key + # objectName: client-id + # - key: client-secret + # objectName: client-secret + # secretName: velero-secrets-store + # type: Opaque + # Settings for Velero's prometheus metrics. Enabled by default. metrics: enabled: true @@ -112,9 +154,39 @@ metrics: serviceMonitor: enabled: false + annotations: {} additionalLabels: {} # ServiceMonitor namespace. Default to Velero namespace. # namespace: + # ServiceMonitor connection scheme. Defaults to HTTP. + # scheme: "" + # ServiceMonitor connection tlsConfig. Defaults to {}. + # tlsConfig: {} + + prometheusRule: + enabled: false + # Additional labels to add to deployed PrometheusRule + additionalLabels: {} + # PrometheusRule namespace. Defaults to Velero namespace. + # namespace: "" + # Rules to be deployed + spec: [] + # - alert: VeleroBackupPartialFailures + # annotations: + # message: Velero backup {{ $labels.schedule }} has {{ $value | humanizePercentage }} partialy failed backups. + # expr: |- + # velero_backup_partial_failure_total{schedule!=""} / velero_backup_attempt_total{schedule!=""} > 0.25 + # for: 15m + # labels: + # severity: warning + # - alert: VeleroBackupFailures + # annotations: + # message: Velero backup {{ $labels.schedule }} has {{ $value | humanizePercentage }} failed backups. + # expr: |- + # velero_backup_failure_total{schedule!=""} / velero_backup_attempt_total{schedule!=""} > 0.25 + # for: 15m + # labels: + # severity: warning kubectl: image: @@ -124,6 +196,9 @@ kubectl: # digest: # kubectl image tag. If used, it will take precedence over the cluster Kubernetes version. # tag: 1.16.15 + # Container Level Security Context for the 'kubectl' container of the crd jobs. Optional. + # See: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + containerSecurityContext: {} # Resource requests/limits to specify for the upgrade/cleanup job. Optional resources: {} # Annotations to set for the upgrade/cleanup job. Optional. @@ -168,6 +243,9 @@ configuration: prefix: # default indicates this location is the default backup storage location. Optional. default: + # accessMode determines if velero can write to this backup storage location. Optional. + # default to ReadWrite, ReadOnly is used during migrations and restores. + accessMode: ReadWrite # Additional provider-specific configuration. See link above # for details of required/optional fields for your provider. config: {} @@ -209,36 +287,60 @@ configuration: # here if using a non-default value. The `velero server` default values are shown in the # comments below. # -------------------- + # `velero server` default: restic + uploaderType: # `velero server` default: 1m backupSyncPeriod: - # `velero server` default: 1h - resticTimeout: - # `velero server` default: namespaces,persistentvolumes,persistentvolumeclaims,secrets,configmaps,serviceaccounts,limitranges,pods - restoreResourcePriorities: - # `velero server` default: false - restoreOnlyMode: - # `velero server` default: 20.0 - clientQPS: + # `velero server` default: 4h + fsBackupTimeout: # `velero server` default: 30 clientBurst: + # `velero server` default: 500 + clientPageSize: + # `velero server` default: 20.0 + clientQPS: + # Name of the default backup storage location. Default: default + defaultBackupStorageLocation: + # How long to wait by default before backups can be garbage collected. Default: 72h + defaultBackupTTL: + # Name of the default volume snapshot location. + defaultVolumeSnapshotLocations: # `velero server` default: empty disableControllers: - # - - # additional key/value pairs to be used as environment variables such as "AWS_CLUSTER_NAME: 'yourcluster.domain.tld'" - extraEnvVars: {} - + # `velero server` default: 1h + garbageCollectionFrequency: + # Set log-format for Velero pod. Default: text. Other option: json. + logFormat: + # Set log-level for Velero pod. Default: info. Other options: debug, warning, error, fatal, panic. + logLevel: + # The address to expose prometheus metrics. Default: :8085 + metricsAddress: + # Directory containing Velero plugins. Default: /plugins + pluginDir: + # The address to expose the pprof profiler. Default: localhost:6060 + profilerAddress: + # `velero server` default: false + restoreOnlyMode: + # `velero server` default: customresourcedefinitions,namespaces,storageclasses,volumesnapshotclass.snapshot.storage.k8s.io,volumesnapshotcontents.snapshot.storage.k8s.io,volumesnapshots.snapshot.storage.k8s.io,persistentvolumes,persistentvolumeclaims,secrets,configmaps,serviceaccounts,limitranges,pods,replicasets.apps,clusterclasses.cluster.x-k8s.io,clusters.cluster.x-k8s.io,clusterresourcesets.addons.cluster.x-k8s.io + restoreResourcePriorities: + # `velero server` default: 1m + storeValidationFrequency: + # How long to wait on persistent volumes and namespaces to terminate during a restore before timing out. Default: 10m + terminatingResourceTimeout: # Comma separated list of velero feature flags. default: empty + # features: EnableCSI features: + # `velero server` default: velero + namespace: - # Set log-level for Velero pod. Default: info. Other options: debug, warning, error, fatal, panic. - logLevel: + # additional key/value pairs to be used as environment variables such as "AWS_CLUSTER_NAME: 'yourcluster.domain.tld'" + extraEnvVars: {} - # Set log-format for Velero pod. Default: text. Other option: json. - logFormat: + # Set true for backup all pod volumes without having to apply annotation on the pod when used file system backup Default: false. + defaultVolumesToFsBackup: - # Set true for backup all pod volumes without having to apply annotation on the pod when used restic Default: false. Other option: false. - defaultVolumesToRestic: + # How often repository maintain is run for repositories by default. + defaultRepoMaintainFrequency: ## ## End of backup/snapshot location settings. @@ -254,6 +356,8 @@ rbac: create: true # Whether to create the cluster role binding to give administrator permissions to Velero clusterAdministrator: true + # Name of the ClusterRole. + clusterAdministratorName: cluster-admin # Information about the Kubernetes service account Velero uses. serviceAccount: @@ -267,9 +371,9 @@ serviceAccount: # should contain credentials for the cloud provider IAM account you've # set up for Velero. credentials: - # Whether a secret should be used as the source of IAM account - # credentials. Set to false if, for example, using kube2iam or - # kiam to provide IAM credentials for the Velero pod. + # Whether a secret should be used. Set to false if, for examples: + # - using kube2iam or kiam to provide AWS IAM credentials instead of providing the key file. (AWS only) + # - using workload identity instead of providing the key file. (GCP only) useSecret: true # Name of the secret to create if `useSecret` is true and `existingSecret` is empty name: @@ -292,7 +396,7 @@ credentials: # additional key/value pairs to be used as environment variables such as "DIGITALOCEAN_TOKEN: ". Values will be stored in the secret. extraEnvVars: {} # Name of a pre-existing secret (if any) in the Velero namespace - # that will be used to load environment variables into velero and restic. + # that will be used to load environment variables into velero and node-agent. # Secret should be in format - https://kubernetes.io/docs/concepts/configuration/secret/#use-case-as-container-environment-variables extraSecretRef: "" @@ -301,15 +405,15 @@ backupsEnabled: true # Whether to create volumesnapshotlocation crd, if false => disable snapshot feature snapshotsEnabled: true -# Whether to deploy the restic daemonset. -deployRestic: false +# Whether to deploy the node-agent daemonset. +deployNodeAgent: false -restic: +nodeAgent: podVolumePath: /var/lib/kubelet/pods privileged: false - # Pod priority class name to use for the Restic daemonset. Optional. + # Pod priority class name to use for the node-agent daemonset. Optional. priorityClassName: "" - # Resource requests/limits to specify for the Restic daemonset deployment. Optional. + # Resource requests/limits to specify for the node-agent daemonset deployment. Optional. # https://velero.io/docs/v1.6/customize-installation/#customize-resource-requests-and-limits resources: requests: @@ -319,13 +423,13 @@ restic: cpu: 1000m memory: 1024Mi - # Tolerations to use for the Restic daemonset. Optional. + # Tolerations to use for the node-agent daemonset. Optional. tolerations: [] - # Annotations to set for the Restic daemonset. Optional. + # Annotations to set for the node-agent daemonset. Optional. annotations: {} - # labels to set for the Restic daemonset. Optional. + # labels to set for the node-agent daemonset. Optional. labels: {} # will map /scratch to emptyDir. Set to false and specify your own volume @@ -333,13 +437,16 @@ restic: # if you don't want to use emptyDir. useScratchEmptyDir: true - # Extra volumes for the Restic daemonset. Optional. + # Extra volumes for the node-agent daemonset. Optional. extraVolumes: [] - # Extra volumeMounts for the Restic daemonset. Optional. + # Extra volumeMounts for the node-agent daemonset. Optional. extraVolumeMounts: [] - # Configure the dnsPolicy of the Restic daemonset + # Key/value pairs to be used as environment variables for the node-agent daemonset. Optional. + extraEnvVars: {} + + # Configure the dnsPolicy of the node-agent daemonset # See: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy dnsPolicy: ClusterFirst @@ -350,13 +457,22 @@ restic: runAsUser: 0 # fsGroup: 1337 - # Container Level Security Context for the 'restic' container of the restic DaemonSet. Optional. + # Container Level Security Context for the 'node-agent' container of the node-agent daemonset. Optional. # See: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container containerSecurityContext: {} - # Node selector to use for the Restic daemonset. Optional. + # Container Lifecycle Hooks to use for the node-agent daemonset. Optional. + lifecycle: {} + + # Node selector to use for the node-agent daemonset. Optional. nodeSelector: {} + # Affinity to use with node-agent daemonset. Optional. + affinity: {} + + # DNS configuration to use for the node-agent daemonset. Optional. + dnsConfig: {} + # Backup schedules to create. # Eg: # schedules: @@ -367,7 +483,7 @@ restic: # annotations: # myenv: foo # schedule: "0 0 * * *" -# useOwnerReferencesInBackup: true +# useOwnerReferencesInBackup: false # template: # ttl: "240h" # includedNamespaces: @@ -377,12 +493,12 @@ schedules: {} # Velero ConfigMaps. # Eg: # configMaps: -# restic-restore-action-config: +# fs-restore-action-config: # labels: # velero.io/plugin-config: "" -# velero.io/restic: RestoreItemAction +# velero.io/pod-volume-restore: RestoreItemAction # data: -# image: velero/velero-restic-restore-helper:v1.7.0 +# image: velero/velero-restore-helper:v1.10.0 configMaps: {} ## diff --git a/terraform/helm/velero_values.yaml b/terraform/helm/velero_values.yaml index 6c59cc21..c8eddb87 100644 --- a/terraform/helm/velero_values.yaml +++ b/terraform/helm/velero_values.yaml @@ -1,23 +1,21 @@ # velero helm values -# source (with full comments): https://github.com/vmware-tanzu/helm-charts/blob/velero-2.17.0/charts/velero/values.yaml +# source (with full comments): https://github.com/vmware-tanzu/helm-charts/blob/main/charts/velero/values.yaml # https://github.com/vmware-tanzu/velero-plugin-for-microsoft-azure/releases initContainers: - name: velero-plugin-for-microsoft-azure # https://hub.docker.com/r/velero/velero-plugin-for-microsoft-azure/tags - image: velero/velero-plugin-for-microsoft-azure:v1.2.1 + image: velero/velero-plugin-for-microsoft-azure:v1.6.0 imagePullPolicy: IfNotPresent volumeMounts: - mountPath: /target name: plugins - # BackupStorageLocation and VolumeSnapshotLocation configuration: provider: azure backupStorageLocation: name: default - provider: bucket: velero volumeSnapshotLocation: name: default diff --git a/terraform/kured_helm.tf b/terraform/kured_helm.tf index f8ebfbba..7073d157 100644 --- a/terraform/kured_helm.tf +++ b/terraform/kured_helm.tf @@ -11,7 +11,7 @@ resource "kubernetes_namespace" "kured" { delete = "15m" } - depends_on = [module.aks] + depends_on = [azurerm_kubernetes_cluster.aks] } # https://www.terraform.io/docs/providers/helm/r/release.html @@ -19,7 +19,7 @@ resource "helm_release" "kured" { chart = "kured" name = "kured" namespace = kubernetes_namespace.kured.metadata[0].name - repository = "https://weaveworks.github.io/kured" + repository = "https://kubereboot.github.io/charts/" version = var.kured_chart_version timeout = 600 atomic = true diff --git a/terraform/nexus_helm.tf b/terraform/nexus_helm.tf index eaeba3e1..e3e3cc3c 100644 --- a/terraform/nexus_helm.tf +++ b/terraform/nexus_helm.tf @@ -9,7 +9,7 @@ resource "kubernetes_namespace" "nexus" { delete = "15m" } - depends_on = [module.aks] + depends_on = [azurerm_kubernetes_cluster.aks] } # https://www.terraform.io/docs/provisioners/local-exec.html @@ -30,7 +30,7 @@ resource "null_resource" "nexus_cert_sync" { } depends_on = [ - local_file.kubeconfig, + local_sensitive_file.kubeconfig, helm_release.akv2k8s, kubernetes_namespace.nexus ] diff --git a/terraform/nginx_helm.tf b/terraform/nginx_helm.tf index 391464e2..8267892e 100644 --- a/terraform/nginx_helm.tf +++ b/terraform/nginx_helm.tf @@ -9,7 +9,7 @@ resource "kubernetes_namespace" "ingress" { delete = "15m" } - depends_on = [module.aks] + depends_on = [azurerm_kubernetes_cluster.aks] } # https://www.terraform.io/docs/providers/helm/r/release.html diff --git a/terraform/outputs.tf b/terraform/outputs.tf index dd91373d..8944140d 100644 --- a/terraform/outputs.tf +++ b/terraform/outputs.tf @@ -1,14 +1,14 @@ # WARNING: this outputs credential / login config # output "aks_config" { -# value = module.aks +# value = azurerm_kubernetes_cluster.aks # } output "aks_credentials_command" { - value = "az aks get-credentials --resource-group ${azurerm_resource_group.aks.name} --name ${module.aks.name} --overwrite-existing" + value = "az aks get-credentials --resource-group ${azurerm_resource_group.aks.name} --name ${azurerm_kubernetes_cluster.aks.name} --overwrite-existing" } output "aks_node_resource_group" { - value = module.aks.node_resource_group + value = azurerm_kubernetes_cluster.aks.node_resource_group } # output "ssh_private_key" { @@ -25,9 +25,9 @@ output "aks_node_resource_group" { # } # output "client_certificate" { -# value = module.aks.kube_config[0].client_certificate +# value = azurerm_kubernetes_cluster.aks.kube_config[0].client_certificate # } # output "kube_config" { -# value = module.aks.kube_config_raw +# value = azurerm_kubernetes_cluster.aks.kube_config_raw # } diff --git a/terraform/variables.tf b/terraform/variables.tf index f53f5813..6a819160 100644 --- a/terraform/variables.tf +++ b/terraform/variables.tf @@ -1,102 +1,100 @@ # Variables - #region Versions # version used for both main AKS API service, and default node pool # https://github.com/Azure/AKS/releases # az aks get-versions --location eastus --output table +# az aks get-versions --location uksouth --output tsv --query "orchestrators | [?default].orchestratorVersion" variable "kubernetes_version" { - default = "1.21.2" + default = "1.24.6" } # Helm charts -# Migrated to newer kubernetes nginx helm chart: -# https://github.com/kubernetes/ingress-nginx/tree/master/charts/ingress-nginx#migrating-from-stablenginx-ingress -# -# https://kubernetes.github.io/ingress-nginx/deploy/#using-helm # https://github.com/kubernetes/ingress-nginx/releases -# https://github.com/kubernetes/ingress-nginx/blob/ingress-nginx-3.11.0/charts/ingress-nginx/Chart.yaml#L3 -# # helm repo update # helm search repo ingress-nginx/ingress-nginx +# helm search repo -l ingress-nginx/ingress-nginx | head -5 variable "nginx_chart_version" { - default = "4.0.6" + default = "4.3.0" } # https://hub.helm.sh/charts/jetstack/cert-manager # helm search repo jetstack/cert-manager variable "cert_manager_chart_version" { - default = "v1.6.1" + default = "v1.11.0" } # https://github.com/vmware-tanzu/helm-charts/releases # helm search repo vmware-tanzu/velero +# * also update terraform/helm/velero_default_values.yaml # * also update terraform/helm/velero_values.yaml variable "velero_chart_version" { - default = "2.26.1" + default = "3.1.0" } # https://hub.docker.com/r/velero/velero/tags variable "velero_image_tag" { - default = "v1.7.0" + default = "v1.10.0" } # https://hub.docker.com/r/sonatype/nexus3/tags variable "nexus_image_tag" { - default = "3.36.0" + default = "3.45.1" } # https://github.com/adamrushuk/charts/releases # helm search repo adamrushuk/sonatype-nexus variable "nexus_chart_version" { - default = "0.2.8" + default = "0.3.1" } # https://github.com/SparebankenVest/azure-key-vault-to-kubernetes -# https://github.com/SparebankenVest/public-helm-charts/releases # https://github.com/SparebankenVest/helm-charts/tree/gh-pages/akv2k8s # https://github.com/SparebankenVest/public-helm-charts/blob/master/stable/akv2k8s/Chart.yaml#L5 # helm search repo spv-charts/akv2k8s variable "akv2k8s_chart_version" { - default = "2.1.0" + default = "2.3.2" } # https://github.com/Azure/aad-pod-identity/blob/master/charts/aad-pod-identity/Chart.yaml#L4 # helm search repo aad-pod-identity/aad-pod-identity variable "aad_pod_identity_chart_version" { - default = "4.1.6" + default = "4.1.15" } # https://bitnami.com/stack/external-dns/helm -# https://github.com/bitnami/charts/blob/master/bitnami/external-dns/Chart.yaml#L21 +# https://github.com/bitnami/charts/blob/master/bitnami/external-dns/Chart.yaml # helm search repo bitnami/external-dns +# helm search repo -l bitnami/external-dns variable "external_dns_chart_version" { - default = "5.4.8" + default = "6.13.1" } -# https://github.com/weaveworks/kured/tree/master/charts/kured -# helm search repo kured/kured +# https://github.com/kubereboot/charts/tree/main/charts/kured +# helm search repo kubereboot/kured variable "kured_chart_version" { - default = "2.10.0" + default = "4.2.0" } -# https://github.com/weaveworks/kured#kubernetes--os-compatibility +# https://kured.dev/docs/installation/#kubernetes--os-compatibility variable "kured_image_tag" { - default = "1.8.0" + default = "1.12.0" } # argo cd # https://github.com/argoproj/argo-helm/blob/master/charts/argo-cd/Chart.yaml#L5 # helm search repo argo/argo-cd +# helm search repo -l argo/argo-cd | head -n 20 +# * also update terraform/helm/argocd_default_values.yaml variable "argocd_chart_version" { - default = "3.26.3" + default = "5.19.11" } # https://hub.docker.com/r/argoproj/argocd/tags # * also update cli version: terraform/files/scripts/argocd_config.sh#L22 variable "argocd_image_tag" { - default = "v2.1.6" + default = "v2.5.9" } #endregion Versions @@ -161,44 +159,16 @@ variable "aks_admins_aad_group_name" { default = "AKS-Admins" } -variable "sla_sku" { - description = "Define the SLA under which the managed master control plane of AKS is running" - type = string - default = "Free" -} - variable "aks_container_insights_enabled" { description = "Should Container Insights monitoring be enabled" - default = false + default = true } variable "aks_config_path" { default = "./azurek8s_config" } - - # Agent Pool -variable "agent_pool_node_count" { - default = 1 -} - -variable "agent_pool_enable_auto_scaling" { - default = false -} - -variable "agent_pool_node_min_count" { - default = null -} - -variable "agent_pool_node_max_count" { - default = null -} - -variable "agent_pool_profile_name" { - default = "default" -} - variable "agent_pool_profile_vm_size" { # https://azureprice.net/?region=ukwest¤cy=GBP # Standard_D2s_v3 - ยฃ0.086455 per hour @@ -221,14 +191,6 @@ variable "agent_pool_profile_vm_size" { default = "Standard_D4s_v3" } -variable "agent_pool_profile_os_type" { - default = "Linux" -} - -variable "agent_pool_profile_disk_size_gb" { - default = 30 -} - # Velero @@ -237,10 +199,6 @@ variable "velero_enabled" { default = "__VELERO_ENABLED__" } -variable "velero_resource_group_name" { - default = "__VELERO_STORAGE_RG__" -} - variable "velero_storage_account_name" { default = "__VELERO_STORAGE_ACCOUNT__" } diff --git a/terraform/velero.tf b/terraform/velero.tf index f0dd9361..0fef0e38 100644 --- a/terraform/velero.tf +++ b/terraform/velero.tf @@ -2,36 +2,18 @@ # Prereqs # https://github.com/vmware-tanzu/velero-plugin-for-microsoft-azure/blob/master/README.md#Create-Azure-storage-account-and-blob-container -resource "azurerm_resource_group" "velero" { - count = var.velero_enabled ? 1 : 0 - name = var.velero_resource_group_name - location = var.location - tags = var.tags - - lifecycle { - ignore_changes = [ - tags - ] - } -} resource "azurerm_storage_account" "velero" { count = var.velero_enabled ? 1 : 0 name = var.velero_storage_account_name - resource_group_name = azurerm_resource_group.velero[0].name - location = azurerm_resource_group.velero[0].location + resource_group_name = azurerm_resource_group.aks.name + location = azurerm_resource_group.aks.location account_kind = "BlobStorage" account_tier = "Standard" account_replication_type = "LRS" enable_https_traffic_only = true - - tags = var.tags - - lifecycle { - ignore_changes = [ - tags - ] - } + min_tls_version = "TLS1_2" + tags = var.tags } resource "azurerm_storage_container" "velero" { @@ -52,7 +34,7 @@ resource "kubernetes_namespace" "velero" { delete = "15m" } - depends_on = [module.aks] + depends_on = [azurerm_kubernetes_cluster.aks] } resource "kubernetes_secret" "velero_credentials" { @@ -69,7 +51,7 @@ resource "kubernetes_secret" "velero_credentials" { data = { cloud = < + +## User Assigned Identity Example + +```bash +# vars +AKS_RESOURCE_GROUP='arshz-rg-aks-dev-001' +AKS_CLUSTER_NAME='arshz-aks-001' +LOCATION='eastus' + +# update aks creds +az aks get-credentials --resource-group "$AKS_RESOURCE_GROUP" --name "$AKS_CLUSTER_NAME" --overwrite-existing --admin + +# test kubectl +kubectl get node +kubectl get pod -A + +# Export environmental variables +export AKS_OIDC_ISSUER="$(az aks show --resource-group "$AKS_RESOURCE_GROUP" --name "$AKS_CLUSTER_NAME" --query "oidcIssuerProfile.issuerUrl" -otsv)" +echo $AKS_OIDC_ISSUER + +# environment variables for the Kubernetes Service account & federated identity credential +export SERVICE_ACCOUNT_NAMESPACE="wi-test" +export SERVICE_ACCOUNT_NAME="workload-identity-sa" + +# environment variables for the Federated Identity +export SUBSCRIPTION="$(az account show --query id --output tsv)" +# user assigned identity name +export UAID="fic-test-ua" +# federated identity name +export FICID="fic-test-fic-name" + + +# Create a managed identity and grant permissions to read from sub +az identity create --name "${UAID}" --resource-group "${AKS_RESOURCE_GROUP}" --location "${LOCATION}" --subscription "${SUBSCRIPTION}" + +export USER_ASSIGNED_CLIENT_ID="$(az identity show --resource-group "${AKS_RESOURCE_GROUP}" --name "${UAID}" --query 'clientId' -otsv)" +export USER_ASSIGNED_PRINCIPAL_ID="$(az identity show --resource-group "${AKS_RESOURCE_GROUP}" --name "${UAID}" --query 'principalId' -otsv)" + +# doesnt work using USER_ASSIGNED_CLIENT_ID +# az role assignment create --assignee-object-id "$USER_ASSIGNED_CLIENT_ID" --role "Reader" --subscription "${SUBSCRIPTION}" --assignee-principal-type 'ServicePrincipal' + +# TODO test +# az role assignment create --assignee-object-id "$USER_ASSIGNED_PRINCIPAL_ID" --role "Reader" --subscription "${SUBSCRIPTION}" --assignee-principal-type 'ServicePrincipal' +az role assignment create --assignee "$USER_ASSIGNED_PRINCIPAL_ID" --role "Reader" --subscription "${SUBSCRIPTION}" + + +# Create Kubernetes service account +kubectl create namespace ${SERVICE_ACCOUNT_NAMESPACE} + +cat <