diff --git a/.github/actions/build-vault/action.yml b/.github/actions/build-vault/action.yml
index d17c6448a009..8fc228415a02 100644
--- a/.github/actions/build-vault/action.yml
+++ b/.github/actions/build-vault/action.yml
@@ -137,20 +137,22 @@ runs:
run: make ci-build
- if: inputs.vault-edition != 'ce'
shell: bash
- run: make ci-prepare-legal
+ run: make ci-prepare-ent-legal
+ - if: inputs.vault-edition == 'ce'
+ shell: bash
+ run: make ci-prepare-ce-legal
- name: Bundle Vault
env:
BUNDLE_PATH: out/${{ steps.metadata.outputs.artifact-basename }}.zip
shell: bash
run: make ci-bundle
- # Use actions/upload-artifact @3.x until https://hashicorp.atlassian.net/browse/HREL-99 is resolved
- - uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
+ - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with:
name: ${{ steps.metadata.outputs.artifact-basename }}.zip
path: out/${{ steps.metadata.outputs.artifact-basename }}.zip
if-no-files-found: error
- if: inputs.create-packages == 'true'
- uses: hashicorp/actions-packaging-linux@v1
+ uses: hashicorp/actions-packaging-linux@33f7d23b14f24e6a7b7d9948cb7f5caca2045ee3
with:
name: ${{ inputs.package-name }}
description: Vault is a tool for secrets management, encryption as a service, and privileged access management.
@@ -176,15 +178,13 @@ runs:
echo "deb-files=$(basename out/*.deb)"
} | tee -a "$GITHUB_OUTPUT"
- if: inputs.create-packages == 'true'
- # Use actions/upload-artifact @3.x until https://hashicorp.atlassian.net/browse/HREL-99 is resolved
- uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
+ uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with:
name: ${{ steps.package-files.outputs.rpm-files }}
path: out/${{ steps.package-files.outputs.rpm-files }}
if-no-files-found: error
- if: inputs.create-packages == 'true'
- # Use actions/upload-artifact @3.x until https://hashicorp.atlassian.net/browse/HREL-99 is resolved
- uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
+ uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with:
name: ${{ steps.package-files.outputs.deb-files }}
path: out/${{ steps.package-files.outputs.deb-files }}
diff --git a/.github/actions/containerize/action.yml b/.github/actions/containerize/action.yml
index 91c67c9dfbc9..e269298e52b7 100644
--- a/.github/actions/containerize/action.yml
+++ b/.github/actions/containerize/action.yml
@@ -90,7 +90,7 @@ runs:
[[ ! -d "$dest_dir" ]] && mkdir -p "$dest_dir"
[[ ! -f "$dest_path" ]] && cp ${{ inputs.vault-binary-path }} "${dest_path}"
- if: inputs.docker == 'true'
- uses: hashicorp/actions-docker-build@v1
+ uses: hashicorp/actions-docker-build@v2
with:
arch: ${{ inputs.goarch }}
do_zip_extract_step: 'false' # Don't download and extract an already present binary
@@ -99,7 +99,7 @@ runs:
revision: ${{ steps.vars.outputs.revision }}
version: ${{ steps.vars.outputs.container-version }}
- if: inputs.redhat == 'true'
- uses: hashicorp/actions-docker-build@v1
+ uses: hashicorp/actions-docker-build@v2
with:
arch: ${{ inputs.goarch }}
do_zip_extract_step: 'false' # Don't download and extract an already present binary
diff --git a/.github/actions/install-external-tools/action.yml b/.github/actions/install-external-tools/action.yml
index 1b9b2babb5a8..152b6822ba09 100644
--- a/.github/actions/install-external-tools/action.yml
+++ b/.github/actions/install-external-tools/action.yml
@@ -22,7 +22,7 @@ runs:
# up here.
- run: go install google.golang.org/protobuf/cmd/protoc-gen-go@latest
shell: bash
- - run: go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest
+ - run: go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.3.0
shell: bash
- run: go install github.com/favadi/protoc-go-inject-tag@latest
shell: bash
diff --git a/.github/actions/set-up-go/action.yml b/.github/actions/set-up-go/action.yml
index 1fde1ec50f49..9a80bf32f497 100644
--- a/.github/actions/set-up-go/action.yml
+++ b/.github/actions/set-up-go/action.yml
@@ -40,7 +40,7 @@ runs:
else
echo "go-version=${{ inputs.go-version }}" | tee -a "$GITHUB_OUTPUT"
fi
- - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
+ - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
with:
go-version: ${{ steps.go-version.outputs.go-version }}
cache: false # We use our own caching strategy
diff --git a/.github/workflows/actionlint.yml b/.github/workflows/actionlint.yml
index b8dcf33c4ca0..38d2b167ff96 100644
--- a/.github/workflows/actionlint.yml
+++ b/.github/workflows/actionlint.yml
@@ -14,7 +14,7 @@ jobs:
actionlint:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: "Check workflow files"
uses: docker://docker.mirror.hashicorp.services/rhysd/actionlint@sha256:93834930f56ca380be3e9a3377670d7aa5921be251b9c774891a39b3629b83b8
with:
diff --git a/.github/workflows/build-artifacts-ce.yml b/.github/workflows/build-artifacts-ce.yml
index 5421cffc05ca..8e6233a4036b 100644
--- a/.github/workflows/build-artifacts-ce.yml
+++ b/.github/workflows/build-artifacts-ce.yml
@@ -98,7 +98,7 @@ jobs:
runs-on: ${{ fromJSON(inputs.compute-build) }}
name: (${{ matrix.goos }}, ${{ matrix.goarch }})
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
ref: ${{ inputs.checkout-ref }}
- uses: ./.github/actions/build-vault
@@ -200,7 +200,7 @@ jobs:
name: (${{ matrix.goos }}, ${{ matrix.goarch }}${{ matrix.goarm && ' ' || '' }}${{ matrix.goarm }})
runs-on: ${{ fromJSON(inputs.compute-build) }}
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
ref: ${{ inputs.checkout-ref }}
- uses: ./.github/actions/build-vault
@@ -228,7 +228,7 @@ jobs:
- core
- extended
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
ref: ${{ inputs.checkout-ref }}
- name: Determine status
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index b876f2fa62fe..10c720ac2293 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -105,13 +105,13 @@ jobs:
workflow-trigger: ${{ steps.metadata.outputs.workflow-trigger }}
steps:
# Run the changed-files action to determine what Git reference we should check out
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: ./.github/actions/changed-files
id: changed-files
- uses: ./.github/actions/checkout
id: checkout # Make sure we check out correct ref after checking changed files
# Get the vault version metadata
- - uses: hashicorp/actions-set-product-version@v1
+ - uses: hashicorp/actions-set-product-version@v2
id: set-product-version
with:
checkout: false # don't override the reference we've checked out
@@ -159,7 +159,7 @@ jobs:
outputs:
cache-key: ui-${{ steps.ui-hash.outputs.ui-hash }}
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
ref: ${{ needs.setup.outputs.checkout-ref }}
- name: Get UI hash
@@ -291,7 +291,7 @@ jobs:
- test
- test-containers
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- id: status
name: Determine status
run: |
@@ -312,7 +312,7 @@ jobs:
- if: needs.setup.outputs.is-enterprise == 'true'
id: secrets
name: Fetch Vault Secrets
- uses: hashicorp/vault-action@9f522b85981b491eab9a52c144d15aedbd0bf371 # v2.8.0
+ uses: hashicorp/vault-action@v3
with:
url: ${{ steps.vault-auth.outputs.addr }}
caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }}
@@ -343,8 +343,6 @@ jobs:
steps.status.outputs.result != 'success' &&
(github.ref_name == 'main' || startsWith(github.ref_name, 'release/'))
uses: slackapi/slack-github-action@70cd7be8e40a46e8b0eced40b0de447bdb42f68e # v1.26.0
- # We intentionally aren't using the following here since it's from an internal repo
- # uses: hashicorp/cloud-gha-slack-notifier@730a033037b8e603adf99ebd3085f0fdfe75e2f4 #v1
env:
SLACK_BOT_TOKEN: ${{ steps.slackbot-token.outputs.slackbot-token }}
with:
@@ -389,8 +387,7 @@ jobs:
with:
version: ${{ needs.setup.outputs.vault-version-metadata }}
product: ${{ needs.setup.outputs.vault-binary-name }}
- # Use actions/upload-artifact @3.x until https://hashicorp.atlassian.net/browse/HREL-99 is resolved
- - uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
+ - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: steps.generate-metadata-file.outcome == 'success' # upload our metadata if we created it
with:
name: metadata.json
diff --git a/.github/workflows/changelog-checker.yml b/.github/workflows/changelog-checker.yml
index ae1f61bfc298..034a8657ee28 100644
--- a/.github/workflows/changelog-checker.yml
+++ b/.github/workflows/changelog-checker.yml
@@ -18,7 +18,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0 # by default the checkout action doesn't checkout all branches
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 669136c31998..cd3b8fe544e3 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -33,7 +33,7 @@ jobs:
ui-changed: ${{ steps.changed-files.outputs.ui-changed }}
workflow-trigger: ${{ steps.metadata.outputs.workflow-trigger }}
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: ./.github/actions/changed-files
id: changed-files
- uses: ./.github/actions/checkout
@@ -146,7 +146,7 @@ jobs:
contents: read
runs-on: ${{ fromJSON(needs.setup.outputs.compute-test-ui) }}
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
name: status
with:
ref: ${{ needs.setup.outputs.checkout-ref }}
@@ -164,7 +164,7 @@ jobs:
node-version-file: './ui/package.json'
cache: yarn
cache-dependency-path: ui/yarn.lock
- - uses: browser-actions/setup-chrome@82b9ce628cc5595478a9ebadc480958a36457dc2 # v1.6.0
+ - uses: browser-actions/setup-chrome@9683066f53b47e92c4104e1bd5535aff208c3530 # v1.6.2
- name: ui-dependencies
working-directory: ./ui
run: |
@@ -177,7 +177,7 @@ jobs:
- if: needs.setup.outputs.is-enterprise == 'true'
id: secrets
name: Fetch secrets
- uses: hashicorp/vault-action@9f522b85981b491eab9a52c144d15aedbd0bf371 # v2.8.0
+ uses: hashicorp/vault-action@v3
with:
url: ${{ steps.vault-auth.outputs.addr }}
caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }}
@@ -226,7 +226,7 @@ jobs:
runs-on: ${{ github.repository == 'hashicorp/vault' && 'ubuntu-latest' || fromJSON('["self-hosted","linux","small"]') }}
permissions: write-all # Ensure we have id-token:write access for vault-auth.
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
# Determine the overall status of our required test jobs.
- name: Determine status
id: status
@@ -268,7 +268,7 @@ jobs:
- if: needs.setup.outputs.is-enterprise == 'true'
id: secrets
name: Fetch Vault Secrets
- uses: hashicorp/vault-action@9f522b85981b491eab9a52c144d15aedbd0bf371 # v2.8.0
+ uses: hashicorp/vault-action@v3
with:
url: ${{ steps.vault-auth.outputs.addr }}
caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }}
@@ -290,8 +290,6 @@ jobs:
)
name: Notify build failures in Slack
uses: slackapi/slack-github-action@70cd7be8e40a46e8b0eced40b0de447bdb42f68e # v1.26.0
- # We intentionally aren't using the following here since it's from an internal repo
- # uses: hashicorp/cloud-gha-slack-notifier@730a033037b8e603adf99ebd3085f0fdfe75e2f4 #v1
env:
SLACK_BOT_TOKEN: ${{ steps.slackbot-token.outputs.slackbot-token }}
with:
diff --git a/.github/workflows/code-checker.yml b/.github/workflows/code-checker.yml
index 5342b7ed8562..c40e53d718bd 100644
--- a/.github/workflows/code-checker.yml
+++ b/.github/workflows/code-checker.yml
@@ -17,7 +17,7 @@ jobs:
name: Setup
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Ensure Go modules are cached
uses: ./.github/actions/set-up-go
with:
@@ -30,7 +30,7 @@ jobs:
needs: setup
if: github.base_ref == 'main'
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
fetch-depth: 0
- uses: ./.github/actions/set-up-go
@@ -46,7 +46,7 @@ jobs:
needs: setup
if: github.base_ref == 'main'
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
fetch-depth: 0
- uses: ./.github/actions/set-up-go
@@ -66,7 +66,7 @@ jobs:
runs-on: ubuntu-latest
needs: setup
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: ./.github/actions/set-up-go
with:
github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
@@ -79,7 +79,7 @@ jobs:
runs-on: ubuntu-latest
needs: setup
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: ./.github/actions/install-external-tools # for buf and gofumpt
- uses: ./.github/actions/set-up-go
with:
@@ -97,6 +97,6 @@ jobs:
container:
image: returntocorp/semgrep@sha256:cfad18cfb6536aa48ad5a71017207a10320b4e17e3b2bd7b7de27b42dc9651e7 #v1.58
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Run Semgrep Rules
run: semgrep ci --include '*.go' --config 'tools/semgrep/ci'
diff --git a/.github/workflows/copywrite.yml b/.github/workflows/copywrite.yml
index 6b5fe2bf3daf..8d978b5cba1d 100644
--- a/.github/workflows/copywrite.yml
+++ b/.github/workflows/copywrite.yml
@@ -12,8 +12,8 @@ jobs:
copywrite:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- - uses: hashicorp/setup-copywrite@867a1a2a064a0626db322392806428f7dc59cb3e # v1.1.2
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
+ - uses: hashicorp/setup-copywrite@32638da2d4e81d56a0764aa1547882fc4d209636 # v1.1.3
name: Setup Copywrite
with:
version: v0.16.4
diff --git a/.github/workflows/enos-lint.yml b/.github/workflows/enos-lint.yml
index 509ebe3cdce1..39d4a620f377 100644
--- a/.github/workflows/enos-lint.yml
+++ b/.github/workflows/enos-lint.yml
@@ -17,9 +17,9 @@ jobs:
runs-on: ${{ steps.metadata.outputs.runs-on }}
version: ${{ steps.metadata.outputs.version }}
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- id: set-product-version
- uses: hashicorp/actions-set-product-version@v1
+ uses: hashicorp/actions-set-product-version@v2
- id: metadata
run: |
echo "version=${{ steps.set-product-version.outputs.product-version }}" >> "$GITHUB_OUTPUT"
@@ -37,7 +37,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }}
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: hashicorp/setup-terraform@v3
with:
terraform_wrapper: false
diff --git a/.github/workflows/enos-release-testing-oss.yml b/.github/workflows/enos-release-testing-oss.yml
index 31c3ad8d15ec..3ce6b6ab5372 100644
--- a/.github/workflows/enos-release-testing-oss.yml
+++ b/.github/workflows/enos-release-testing-oss.yml
@@ -15,7 +15,7 @@ jobs:
vault-version: ${{ github.event.client_payload.payload.version }}
vault-version-package: ${{ steps.get-metadata.outputs.vault-version-package }}
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
# Check out the repository at the same Git SHA that was used to create
# the artifacts to get the correct metadata.
@@ -69,4 +69,4 @@ jobs:
needs: test
steps:
- name: Persist metadata
- uses: hashicorp/actions-persist-metadata@v1
+ uses: hashicorp/actions-persist-metadata@v2
diff --git a/.github/workflows/enos-run-k8s.yml b/.github/workflows/enos-run-k8s.yml
index 8322c76ceba5..440eb87ad930 100644
--- a/.github/workflows/enos-run-k8s.yml
+++ b/.github/workflows/enos-run-k8s.yml
@@ -31,7 +31,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
steps:
- name: Checkout
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Set up Terraform
uses: hashicorp/setup-terraform@v3
with:
@@ -45,8 +45,7 @@ jobs:
github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
- name: Download Docker Image
id: download
- # Use actions/download-artifact @3.x until https://hashicorp.atlassian.net/browse/HREL-99 is resolved
- uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
+ uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
with:
name: ${{ inputs.artifact-name }}
path: ./enos/support/downloads
diff --git a/.github/workflows/oss.yml b/.github/workflows/oss.yml
index 01a61cf5f315..9dedca7fbc28 100644
--- a/.github/workflows/oss.yml
+++ b/.github/workflows/oss.yml
@@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- if: github.event.pull_request != null
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- if: github.event.pull_request != null
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
id: changes
diff --git a/.github/workflows/plugin-update-check.yml b/.github/workflows/plugin-update-check.yml
index 437da47b92d4..c1a083af4c98 100644
--- a/.github/workflows/plugin-update-check.yml
+++ b/.github/workflows/plugin-update-check.yml
@@ -23,13 +23,13 @@ jobs:
RUN_ID: "${{github.run_id}}"
steps:
- run: echo "Branch $PLUGIN_BRANCH of $PLUGIN_REPO"
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
# We don't use the default token so that checks are executed on the resulting PR
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
- - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
+ - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
with:
cache: false # save cache space for vault builds: https://github.com/hashicorp/vault/pull/21764
go-version-file: .go-version
diff --git a/.github/workflows/plugin-update.yml b/.github/workflows/plugin-update.yml
index 3ac3b637905b..e2ea8c9ab0d3 100644
--- a/.github/workflows/plugin-update.yml
+++ b/.github/workflows/plugin-update.yml
@@ -19,13 +19,13 @@ jobs:
env:
VAULT_BRANCH: "update/${{ inputs.plugin }}/v${{ inputs.version }}"
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
# We don't use the default token so that checks are executed on the resulting PR
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
- - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
+ - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
with:
cache: false # save cache space for vault builds: https://github.com/hashicorp/vault/pull/21764
go-version-file: .go-version
diff --git a/.github/workflows/security-scan.yml b/.github/workflows/security-scan.yml
index ace28d466844..9b8872e8aeb9 100644
--- a/.github/workflows/security-scan.yml
+++ b/.github/workflows/security-scan.yml
@@ -20,12 +20,12 @@ jobs:
# won't have the permissions to run this job.
if: ${{ (github.repository != 'hashicorp/vault' || (github.event.pull_request.head.repo.full_name == github.event.pull_request.base.repo.full_name))
&& (github.actor != 'dependabot[bot]') && ( github.actor != 'hc-github-team-secure-vault-core') }}
-
+
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Set up Go
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
+ uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
with:
cache: false # save cache space for vault builds: https://github.com/hashicorp/vault/pull/21764
go-version-file: .go-version
@@ -36,7 +36,7 @@ jobs:
python-version: 3.x
- name: Clone Security Scanner repo
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
repository: hashicorp/security-scanner
token: ${{ secrets.HASHIBOT_PRODSEC_GITHUB_TOKEN }}
diff --git a/.github/workflows/stable-website.yaml b/.github/workflows/stable-website.yaml
index 838d82e88852..e18a02ffbbff 100644
--- a/.github/workflows/stable-website.yaml
+++ b/.github/workflows/stable-website.yaml
@@ -15,7 +15,7 @@ jobs:
name: Cherry pick to stable-website branch
steps:
- name: Checkout
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
ref: stable-website
- run: |
diff --git a/.github/workflows/test-ci-bootstrap.yml b/.github/workflows/test-ci-bootstrap.yml
index 932cb9aa52fb..0a0222b56799 100644
--- a/.github/workflows/test-ci-bootstrap.yml
+++ b/.github/workflows/test-ci-bootstrap.yml
@@ -29,7 +29,7 @@ jobs:
TF_VAR_aws_ssh_public_key: ${{ secrets.SSH_KEY_PUBLIC_CI }}
TF_TOKEN_app_terraform_io: ${{ secrets.TF_API_TOKEN }}
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Set up Terraform
uses: hashicorp/setup-terraform@v3
with:
diff --git a/.github/workflows/test-ci-cleanup.yml b/.github/workflows/test-ci-cleanup.yml
index 03289222d23a..c94d28fb4a53 100644
--- a/.github/workflows/test-ci-cleanup.yml
+++ b/.github/workflows/test-ci-cleanup.yml
@@ -49,7 +49,7 @@ jobs:
role-skip-session-tagging: true
role-duration-seconds: 3600
mask-aws-account-id: false
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Configure
run: |
cp enos/ci/aws-nuke.yml .
diff --git a/.github/workflows/test-enos-scenario-ui.yml b/.github/workflows/test-enos-scenario-ui.yml
index db0465da5750..40009f1d84cc 100644
--- a/.github/workflows/test-enos-scenario-ui.yml
+++ b/.github/workflows/test-enos-scenario-ui.yml
@@ -40,7 +40,7 @@ jobs:
runs-on: ${{ steps.get-metadata.outputs.runs-on }}
vault_edition: ${{ steps.get-metadata.outputs.vault_edition }}
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- id: get-metadata
env:
IS_ENT: ${{ startsWith(github.event.repository.name, 'vault-enterprise' ) }}
@@ -72,7 +72,7 @@ jobs:
GOPRIVATE: github.com/hashicorp
steps:
- name: Checkout
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: ./.github/actions/set-up-go
with:
github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
@@ -109,7 +109,7 @@ jobs:
sudo apt install -y libnss3-dev libgdk-pixbuf2.0-dev libgtk-3-dev libxss-dev libasound2
- name: Install Chrome
if: steps.chrome-check.outputs.chrome-version == 'not-installed'
- uses: browser-actions/setup-chrome@82b9ce628cc5595478a9ebadc480958a36457dc2 # v1.6.0
+ uses: browser-actions/setup-chrome@9683066f53b47e92c4104e1bd5535aff208c3530 # v1.6.2
- name: Installed Chrome Version
run: |
echo "Installed Chrome Version = [$(chrome --version 2> /dev/null || google-chrome --version 2> /dev/null || google-chrome-stable --version 2> /dev/null)]"
diff --git a/.github/workflows/test-go.yml b/.github/workflows/test-go.yml
index 5f74f42b3e56..d4d24923cb32 100644
--- a/.github/workflows/test-go.yml
+++ b/.github/workflows/test-go.yml
@@ -95,7 +95,7 @@ jobs:
matrix: ${{ steps.build.outputs.matrix }}
matrix_ids: ${{ steps.build.outputs.matrix_ids }}
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
ref: ${{ inputs.checkout-ref }}
- uses: ./.github/actions/set-up-go
@@ -111,7 +111,7 @@ jobs:
- name: Fetch Secrets
id: secrets
if: github.repository == 'hashicorp/vault-enterprise'
- uses: hashicorp/vault-action@9f522b85981b491eab9a52c144d15aedbd0bf371 # v2.8.0
+ uses: hashicorp/vault-action@v3
with:
url: ${{ steps.vault-auth.outputs.addr }}
caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }}
@@ -230,7 +230,7 @@ jobs:
go-test-results-download-pattern: ${{ steps.metadata.outputs.go-test-results-download-pattern }}
data-race-log-download-pattern: ${{ steps.metadata.outputs.data-race-log-download-pattern }}
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
ref: ${{ inputs.checkout-ref }}
- uses: ./.github/actions/set-up-go
@@ -285,7 +285,7 @@ jobs:
- name: Fetch Secrets
id: secrets
if: github.repository == 'hashicorp/vault-enterprise'
- uses: hashicorp/vault-action@9f522b85981b491eab9a52c144d15aedbd0bf371 # v2.8.0
+ uses: hashicorp/vault-action@v3
with:
url: ${{ steps.vault-auth.outputs.addr }}
caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }}
@@ -306,6 +306,15 @@ jobs:
run: |
git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN}}@github.com".insteadOf https://github.com
- uses: ./.github/actions/install-external-tools
+ - name: Build Vault HSM binary for tests
+ if: inputs.binary-tests && matrix.id == inputs.total-runners && github.repository == 'hashicorp/vault-enterprise'
+ env:
+ GOPRIVATE: github.com/hashicorp/*
+ run: |
+ set -exo pipefail
+ time make prep enthsmdev
+ # The subsequent build of vault will blow away the bin folder
+ mv bin/vault vault-hsm-binary
- if: inputs.binary-tests && matrix.id == inputs.total-runners
name: Build dev binary for binary tests
# The dev mode binary has to exist for binary tests that are dispatched on the last runner.
@@ -400,6 +409,11 @@ jobs:
# parallelism. The default if -p isn't specified is to use NumCPUs, which seems fine for regular tests.
package_parallelism=""
+ if [ -f vault-hsm-binary ]; then
+ VAULT_HSM_BINARY="$(pwd)/vault-hsm-binary"
+ export VAULT_HSM_BINARY
+ fi
+
if [ -f bin/vault ]; then
VAULT_BINARY="$(pwd)/bin/vault"
export VAULT_BINARY
diff --git a/.github/workflows/test-run-acc-tests-for-path.yml b/.github/workflows/test-run-acc-tests-for-path.yml
index 6c57d2a4627e..372647a1fe06 100644
--- a/.github/workflows/test-run-acc-tests-for-path.yml
+++ b/.github/workflows/test-run-acc-tests-for-path.yml
@@ -20,7 +20,7 @@ jobs:
go-test:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: ./.github/actions/set-up-go
with:
github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
diff --git a/.github/workflows/test-run-enos-scenario-matrix.yml b/.github/workflows/test-run-enos-scenario-matrix.yml
index 2954083080b3..15d80fad72a9 100644
--- a/.github/workflows/test-run-enos-scenario-matrix.yml
+++ b/.github/workflows/test-run-enos-scenario-matrix.yml
@@ -49,7 +49,7 @@ jobs:
sample: ${{ steps.metadata.outputs.sample }}
vault-version: ${{ steps.metadata.outputs.vault-version }}
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
ref: ${{ inputs.vault-revision }}
- uses: hashicorp/action-setup-enos@v1
@@ -99,7 +99,7 @@ jobs:
ENOS_VAR_vault_license_path: ./support/vault.hclic
ENOS_DEBUG_DATA_ROOT_DIR: /tmp/enos-debug-data
steps:
- - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
ref: ${{ inputs.vault-revision }}
- uses: hashicorp/setup-terraform@v3
@@ -127,8 +127,7 @@ jobs:
chmod 600 "./enos/support/private_key.pem"
echo "debug_data_artifact_name=enos-debug-data_$(echo "${{ matrix.scenario }}" | sed -e 's/ /_/g' | sed -e 's/:/=/g')" >> "$GITHUB_OUTPUT"
- if: contains(inputs.sample-name, 'build')
- # Use actions/download-artifact @3.x until https://hashicorp.atlassian.net/browse/HREL-99 is resolved
- uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
+ uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
with:
name: ${{ inputs.build-artifact-name }}
path: ./enos/support/downloads
@@ -176,28 +175,28 @@ jobs:
# https://api.slack.com/apps/A05E31CH1LG/incoming-webhooks
- if: ${{ always() && ! cancelled() }}
name: Notify launch failed
- uses: hashicorp/actions-slack-status@v1
+ uses: hashicorp/actions-slack-status@v2
with:
failure-message: "enos scenario launch ${{ matrix.scenario.id.filter}} failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`"
status: ${{ steps.launch.outcome }}
slack-webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }}
- if: ${{ always() && ! cancelled() }}
name: Notify retry launch failed
- uses: hashicorp/actions-slack-status@v1
+ uses: hashicorp/actions-slack-status@v2
with:
failure-message: "retry enos scenario launch ${{ matrix.scenario.id.filter}} failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`"
status: ${{ steps.launch_retry.outcome }}
slack-webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }}
- if: ${{ always() && ! cancelled() }}
name: Notify destroy failed
- uses: hashicorp/actions-slack-status@v1
+ uses: hashicorp/actions-slack-status@v2
with:
failure-message: "enos scenario destroy ${{ matrix.scenario.id.filter}} failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`"
status: ${{ steps.destroy.outcome }}
slack-webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }}
- if: ${{ always() && ! cancelled() }}
name: Notify retry destroy failed
- uses: hashicorp/actions-slack-status@v1
+ uses: hashicorp/actions-slack-status@v2
with:
failure-message: "retry enos scenario destroy ${{ matrix.scenario.id.filter}} failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`"
status: ${{ steps.destroy_retry.outcome }}
diff --git a/Dockerfile b/Dockerfile
index 3799f778dd2c..62860b7efa6c 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -24,7 +24,8 @@ LABEL name="Vault" \
summary="Vault is a tool for securely accessing secrets." \
description="Vault is a tool for securely accessing secrets. A secret is anything that you want to tightly control access to, such as API keys, passwords, certificates, and more. Vault provides a unified interface to any secret, while providing tight access control and recording a detailed audit log."
-COPY LICENSE /licenses/mozilla.txt
+# Copy the license file as per Legal requirement
+COPY LICENSE /licenses/LICENSE.txt
# Set ARGs as ENV so that they can be used in ENTRYPOINT/CMD
ENV NAME=$NAME
@@ -95,7 +96,8 @@ LABEL name="Vault" \
summary="Vault is a tool for securely accessing secrets." \
description="Vault is a tool for securely accessing secrets. A secret is anything that you want to tightly control access to, such as API keys, passwords, certificates, and more. Vault provides a unified interface to any secret, while providing tight access control and recording a detailed audit log."
-COPY LICENSE /licenses/mozilla.txt
+# Copy the license file as per Legal requirement
+COPY LICENSE /licenses/LICENSE.txt
# Set ARGs as ENV so that they can be used in ENTRYPOINT/CMD
ENV NAME=$NAME
diff --git a/Makefile b/Makefile
index fb7f5798c137..d16a56c1bf75 100644
--- a/Makefile
+++ b/Makefile
@@ -363,9 +363,13 @@ ci-get-version-package:
ci-install-external-tools:
@$(CURDIR)/scripts/ci-helper.sh install-external-tools
-.PHONY: ci-prepare-legal
-ci-prepare-legal:
- @$(CURDIR)/scripts/ci-helper.sh prepare-legal
+.PHONY: ci-prepare-ent-legal
+ci-prepare-ent-legal:
+ @$(CURDIR)/scripts/ci-helper.sh prepare-ent-legal
+
+.PHONY: ci-prepare-ce-legal
+ci-prepare-ce-legal:
+ @$(CURDIR)/scripts/ci-helper.sh prepare-ce-legal
.PHONY: ci-update-external-tool-modules
ci-update-external-tool-modules:
diff --git a/api/go.mod b/api/go.mod
index e1df62b71a2d..7f75b48d068f 100644
--- a/api/go.mod
+++ b/api/go.mod
@@ -22,7 +22,7 @@ require (
github.com/hashicorp/hcl v1.0.0
github.com/mitchellh/mapstructure v1.5.0
github.com/stretchr/testify v1.8.4
- golang.org/x/net v0.17.0
+ golang.org/x/net v0.25.0
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1
)
@@ -35,8 +35,8 @@ require (
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/ryanuber/go-glob v1.0.0 // indirect
- golang.org/x/crypto v0.19.0 // indirect
- golang.org/x/sys v0.17.0 // indirect
- golang.org/x/text v0.14.0 // indirect
+ golang.org/x/crypto v0.23.0 // indirect
+ golang.org/x/sys v0.20.0 // indirect
+ golang.org/x/text v0.15.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/api/go.sum b/api/go.sum
index 01bdbd9e6ea1..452fc5c7e17d 100644
--- a/api/go.sum
+++ b/api/go.sum
@@ -70,8 +70,9 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
+golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
+golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -79,8 +80,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
-golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
+golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
+golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -96,8 +97,9 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
+golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
@@ -108,8 +110,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
-golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
+golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI=
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
diff --git a/api/lifetime_watcher.go b/api/lifetime_watcher.go
index 7070445cc02a..4bc1390b93af 100644
--- a/api/lifetime_watcher.go
+++ b/api/lifetime_watcher.go
@@ -6,6 +6,7 @@ package api
import (
"errors"
"math/rand"
+ "strings"
"sync"
"time"
@@ -289,12 +290,18 @@ func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool,
switch {
case nonRenewable || r.renewBehavior == RenewBehaviorRenewDisabled:
// Can't or won't renew, just keep the same expiration so we exit
- // when it's reauthentication time
+ // when it's re-authentication time
remainingLeaseDuration = fallbackLeaseDuration
default:
// Renew the token
renewal, err = renew(credString, r.increment)
+ if err != nil && strings.Contains(err.Error(), "permission denied") {
+ // We can't renew since the token doesn't have permission to. Fall back
+ // to the code path for non-renewable tokens.
+ nonRenewable = true
+ continue
+ }
if err != nil || renewal == nil || (tokenMode && renewal.Auth == nil) {
if r.renewBehavior == RenewBehaviorErrorOnErrors {
if err != nil {
diff --git a/api/renewer_test.go b/api/renewer_test.go
index 7ba16e66eca2..1c9a5d03e2d2 100644
--- a/api/renewer_test.go
+++ b/api/renewer_test.go
@@ -177,6 +177,20 @@ func TestLifetimeWatcher(t *testing.T) {
expectError: nil,
expectRenewal: true,
},
+ {
+ maxTestTime: time.Second,
+ name: "permission_denied_error",
+ leaseDurationSeconds: 60,
+ incrementSeconds: 10,
+ // This should cause the lifetime watcher to behave just
+ // like a non-renewable secret, i.e. wait until its lifetime
+ // then be done.
+ renew: func(_ string, _ int) (*Secret, error) {
+ return nil, fmt.Errorf("permission denied")
+ },
+ expectError: nil,
+ expectRenewal: false,
+ },
}
for _, tc := range cases {
@@ -204,7 +218,9 @@ func TestLifetimeWatcher(t *testing.T) {
for {
select {
case <-time.After(tc.maxTestTime):
- t.Fatalf("renewal didn't happen")
+ if tc.expectRenewal || tc.expectError != nil {
+ t.Fatalf("expected error or renewal, and neither happened")
+ }
case r := <-v.RenewCh():
if !tc.expectRenewal {
t.Fatal("expected no renewals")
diff --git a/builtin/credential/aws/path_config_identity.go b/builtin/credential/aws/path_config_identity.go
index 0c6f8c3398ec..eb3ef6e5339e 100644
--- a/builtin/credential/aws/path_config_identity.go
+++ b/builtin/credential/aws/path_config_identity.go
@@ -66,7 +66,7 @@ func (b *backend) pathConfigIdentity() *framework.Path {
"iam_alias": {
Type: framework.TypeString,
Default: identityAliasIAMUniqueID,
- Description: fmt.Sprintf("Configure how the AWS auth method generates entity aliases when using IAM auth. Valid values are %q, %q, and %q. Defaults to %q.", identityAliasRoleID, identityAliasIAMUniqueID, identityAliasIAMFullArn, identityAliasRoleID),
+ Description: fmt.Sprintf("Configure how the AWS auth method generates entity aliases when using IAM auth. Valid values are %q, %q, %q and %q. Defaults to %q.", identityAliasRoleID, identityAliasIAMUniqueID, identityAliasIAMFullArn, identityAliasIAMCanonicalArn, identityAliasRoleID),
},
iamAuthMetadataFields.FieldName: authmetadata.FieldSchema(iamAuthMetadataFields),
"ec2_alias": {
@@ -150,7 +150,7 @@ func pathConfigIdentityUpdate(ctx context.Context, req *logical.Request, data *f
iamAliasRaw, ok := data.GetOk("iam_alias")
if ok {
iamAlias := iamAliasRaw.(string)
- allowedIAMAliasValues := []string{identityAliasRoleID, identityAliasIAMUniqueID, identityAliasIAMFullArn}
+ allowedIAMAliasValues := []string{identityAliasRoleID, identityAliasIAMUniqueID, identityAliasIAMFullArn, identityAliasIAMCanonicalArn}
if !strutil.StrListContains(allowedIAMAliasValues, iamAlias) {
return logical.ErrorResponse(fmt.Sprintf("iam_alias of %q not in set of allowed values: %v", iamAlias, allowedIAMAliasValues)), nil
}
@@ -194,11 +194,12 @@ type identityConfig struct {
}
const (
- identityAliasIAMUniqueID = "unique_id"
- identityAliasIAMFullArn = "full_arn"
- identityAliasEC2InstanceID = "instance_id"
- identityAliasEC2ImageID = "image_id"
- identityAliasRoleID = "role_id"
+ identityAliasIAMUniqueID = "unique_id"
+ identityAliasIAMFullArn = "full_arn"
+ identityAliasIAMCanonicalArn = "canonical_arn"
+ identityAliasEC2InstanceID = "instance_id"
+ identityAliasEC2ImageID = "image_id"
+ identityAliasRoleID = "role_id"
)
const pathConfigIdentityHelpSyn = `
diff --git a/builtin/credential/aws/path_login.go b/builtin/credential/aws/path_login.go
index b66146d1ee67..e3d31229fc74 100644
--- a/builtin/credential/aws/path_login.go
+++ b/builtin/credential/aws/path_login.go
@@ -1397,6 +1397,8 @@ func (b *backend) pathLoginUpdateIam(ctx context.Context, req *logical.Request,
identityAlias = callerUniqueId
case identityAliasIAMFullArn:
identityAlias = callerID.Arn
+ case identityAliasIAMCanonicalArn:
+ identityAlias = entity.canonicalArn()
}
// If we're just looking up for MFA, return the Alias info
diff --git a/builtin/logical/pki/ca_util.go b/builtin/logical/pki/ca_util.go
index 2006684889ff..4ad1887853f7 100644
--- a/builtin/logical/pki/ca_util.go
+++ b/builtin/logical/pki/ca_util.go
@@ -237,7 +237,7 @@ func getKeyTypeAndBitsFromPublicKeyForRole(pubKey crypto.PublicKey) (certutil.Pr
keyBits = certutil.GetPublicKeySize(pubKey)
case *ecdsa.PublicKey:
keyType = certutil.ECPrivateKey
- case *ed25519.PublicKey:
+ case ed25519.PublicKey:
keyType = certutil.Ed25519PrivateKey
default:
return certutil.UnknownPrivateKey, 0, fmt.Errorf("unsupported public key: %#v", pubKey)
diff --git a/builtin/logical/pki/ca_util_test.go b/builtin/logical/pki/ca_util_test.go
new file mode 100644
index 000000000000..d4ef64e68fe1
--- /dev/null
+++ b/builtin/logical/pki/ca_util_test.go
@@ -0,0 +1,82 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: BUSL-1.1
+
+package pki
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "testing"
+
+ "github.com/hashicorp/vault/sdk/helper/certutil"
+)
+
+func TestGetKeyTypeAndBitsFromPublicKeyForRole(t *testing.T) {
+ rsaKey, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ t.Fatalf("error generating rsa key: %s", err)
+ }
+
+ ecdsaKey, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
+ if err != nil {
+ t.Fatalf("error generating ecdsa key: %s", err)
+ }
+
+ publicKey, _, err := ed25519.GenerateKey(rand.Reader)
+ if err != nil {
+ t.Fatalf("error generating ed25519 key: %s", err)
+ }
+
+ testCases := map[string]struct {
+ publicKey crypto.PublicKey
+ expectedKeyType certutil.PrivateKeyType
+ expectedKeyBits int
+ expectError bool
+ }{
+ "rsa": {
+ publicKey: rsaKey.Public(),
+ expectedKeyType: certutil.RSAPrivateKey,
+ expectedKeyBits: 2048,
+ },
+ "ecdsa": {
+ publicKey: ecdsaKey.Public(),
+ expectedKeyType: certutil.ECPrivateKey,
+ expectedKeyBits: 0,
+ },
+ "ed25519": {
+ publicKey: publicKey,
+ expectedKeyType: certutil.Ed25519PrivateKey,
+ expectedKeyBits: 0,
+ },
+ "bad key type": {
+ publicKey: []byte{},
+ expectedKeyType: certutil.UnknownPrivateKey,
+ expectedKeyBits: 0,
+ expectError: true,
+ },
+ }
+
+ for name, tt := range testCases {
+ t.Run(name, func(t *testing.T) {
+ keyType, keyBits, err := getKeyTypeAndBitsFromPublicKeyForRole(tt.publicKey)
+ if err != nil && !tt.expectError {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if err == nil && tt.expectError {
+ t.Fatal("expected error, got nil")
+ }
+
+ if keyType != tt.expectedKeyType {
+ t.Fatalf("key type mismatch: expected %s, got %s", tt.expectedKeyType, keyType)
+ }
+
+ if keyBits != tt.expectedKeyBits {
+ t.Fatalf("key bits mismatch: expected %d, got %d", tt.expectedKeyBits, keyBits)
+ }
+ })
+ }
+}
diff --git a/builtin/logical/transit/path_hmac.go b/builtin/logical/transit/path_hmac.go
index 0465b8dfa2be..f71c9516ea5f 100644
--- a/builtin/logical/transit/path_hmac.go
+++ b/builtin/logical/transit/path_hmac.go
@@ -257,7 +257,19 @@ func (b *backend) pathHMACVerify(ctx context.Context, req *logical.Request, d *f
name := d.Get("name").(string)
algorithm := d.Get("urlalgorithm").(string)
if algorithm == "" {
- algorithm = d.Get("algorithm").(string)
+ hashAlgorithmRaw, hasHashAlgorithm := d.GetOk("hash_algorithm")
+ algorithmRaw, hasAlgorithm := d.GetOk("algorithm")
+
+ // As `algorithm` is deprecated, make sure we only read it if
+ // `hash_algorithm` is not present.
+ switch {
+ case hasHashAlgorithm:
+ algorithm = hashAlgorithmRaw.(string)
+ case hasAlgorithm:
+ algorithm = algorithmRaw.(string)
+ default:
+ algorithm = d.Get("hash_algorithm").(string)
+ }
}
// Get the policy
diff --git a/builtin/logical/transit/path_hmac_test.go b/builtin/logical/transit/path_hmac_test.go
index 4fa0fbce318c..3f21106c4cc9 100644
--- a/builtin/logical/transit/path_hmac_test.go
+++ b/builtin/logical/transit/path_hmac_test.go
@@ -94,17 +94,40 @@ func TestTransit_HMAC(t *testing.T) {
}
// Now verify
+ verify := func() {
+ t.Helper()
+
+ resp, err = b.HandleRequest(context.Background(), req)
+ if err != nil {
+ t.Fatalf("%v: %v", err, resp)
+ }
+ if resp == nil {
+ t.Fatal("expected non-nil response")
+ }
+ if errStr, ok := resp.Data["error"]; ok {
+ t.Fatalf("error validating hmac: %s", errStr)
+ }
+ if resp.Data["valid"].(bool) == false {
+ t.Fatalf(fmt.Sprintf("error validating hmac;\nreq:\n%#v\nresp:\n%#v", *req, *resp))
+ }
+ }
req.Path = strings.ReplaceAll(req.Path, "hmac", "verify")
req.Data["hmac"] = value.(string)
- resp, err = b.HandleRequest(context.Background(), req)
- if err != nil {
- t.Fatalf("%v: %v", err, resp)
- }
- if resp == nil {
- t.Fatal("expected non-nil response")
- }
- if resp.Data["valid"].(bool) == false {
- panic(fmt.Sprintf("error validating hmac;\nreq:\n%#v\nresp:\n%#v", *req, *resp))
+ verify()
+
+ // If `algorithm` parameter is used, try with `hash_algorithm` as well
+ if algorithm, ok := req.Data["algorithm"]; ok {
+ // Note that `hash_algorithm` takes precedence over `algorithm`, since the
+ // latter is deprecated.
+ req.Data["hash_algorithm"] = algorithm
+ req.Data["algorithm"] = "xxx"
+ defer func() {
+ // Restore the req fields, since it is re-used by the tests below
+ delete(req.Data, "hash_algorithm")
+ req.Data["algorithm"] = algorithm
+ }()
+
+ verify()
}
}
diff --git a/changelog/26844.txt b/changelog/26844.txt
new file mode 100644
index 000000000000..49f7bf2f1611
--- /dev/null
+++ b/changelog/26844.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+auto-auth: Addressed issue where having no permissions to renew a renewable token caused auto-auth to attempt to renew constantly with no backoff
+```
diff --git a/changelog/26876.txt b/changelog/26876.txt
new file mode 100644
index 000000000000..6522b0ecd9a6
--- /dev/null
+++ b/changelog/26876.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+core: Add missing field delegated_auth_accessors to GET /sys/mounts/:path API response
+```
diff --git a/changelog/26890.txt b/changelog/26890.txt
new file mode 100644
index 000000000000..74d06a9cf781
--- /dev/null
+++ b/changelog/26890.txt
@@ -0,0 +1,3 @@
+```release-note:change
+auth/jwt: Update plugin to v0.20.3
+```
diff --git a/changelog/26896.txt b/changelog/26896.txt
new file mode 100644
index 000000000000..6147953d0b27
--- /dev/null
+++ b/changelog/26896.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+secrets/azure: Update vault-plugin-secrets-azure to 0.17.2 to include a bug fix for azure role creation
+```
diff --git a/changelog/26985.txt b/changelog/26985.txt
new file mode 100644
index 000000000000..7894bd3d407d
--- /dev/null
+++ b/changelog/26985.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+ui: Prevent perpetual loading screen when Vault needs initialization
+```
diff --git a/changelog/26993.txt b/changelog/26993.txt
new file mode 100644
index 000000000000..35acaa79a8ad
--- /dev/null
+++ b/changelog/26993.txt
@@ -0,0 +1,3 @@
+```release-note:improvement
+ui: Update PGP display and show error for Generate Operation Token flow with PGP
+```
\ No newline at end of file
diff --git a/changelog/27014.txt b/changelog/27014.txt
new file mode 100644
index 000000000000..94f6ebbe075a
--- /dev/null
+++ b/changelog/27014.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+core: Address a data race updating a seal's last seen healthy time attribute
+```
diff --git a/changelog/27019.txt b/changelog/27019.txt
new file mode 100644
index 000000000000..722e0d46c9ec
--- /dev/null
+++ b/changelog/27019.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+ui: Fix link to v2 generic secrets engine from secrets list page.
+```
\ No newline at end of file
diff --git a/changelog/27093.txt b/changelog/27093.txt
new file mode 100644
index 000000000000..a24becec3eac
--- /dev/null
+++ b/changelog/27093.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+pki: Fix error in cross-signing using ed25519 keys
+```
diff --git a/changelog/27094.txt b/changelog/27094.txt
new file mode 100644
index 000000000000..9cd743f55f94
--- /dev/null
+++ b/changelog/27094.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+ui: Fix KVv2 json editor to allow null values.
+```
\ No newline at end of file
diff --git a/changelog/27120.txt b/changelog/27120.txt
new file mode 100644
index 000000000000..3a9630b986c5
--- /dev/null
+++ b/changelog/27120.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+ui: Fix KVv2 cursor jumping inside json editor after initial input.
+```
\ No newline at end of file
diff --git a/changelog/27178.txt b/changelog/27178.txt
new file mode 100644
index 000000000000..c84c67f34e27
--- /dev/null
+++ b/changelog/27178.txt
@@ -0,0 +1,3 @@
+```release-note:change
+ui/kubernetes: Update the roles filter-input to use explicit search.
+```
diff --git a/changelog/27184.txt b/changelog/27184.txt
new file mode 100644
index 000000000000..500045efb5af
--- /dev/null
+++ b/changelog/27184.txt
@@ -0,0 +1,3 @@
+```release-note:change
+core/identity: improve performance for secondary nodes receiving identity related updates through replication
+```
diff --git a/changelog/27211.txt b/changelog/27211.txt
new file mode 100644
index 000000000000..26bf725ebff3
--- /dev/null
+++ b/changelog/27211.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+secrets/transit: Use 'hash_algorithm' parameter if present in HMAC verify requests. Otherwise fall back to deprecated 'algorithm' parameter.
+```
diff --git a/command/agent_test.go b/command/agent_test.go
index c87885680f7e..ddef97f5eac1 100644
--- a/command/agent_test.go
+++ b/command/agent_test.go
@@ -27,6 +27,8 @@ import (
vaultjwt "github.com/hashicorp/vault-plugin-auth-jwt"
logicalKv "github.com/hashicorp/vault-plugin-secrets-kv"
"github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/audit"
+ auditFile "github.com/hashicorp/vault/builtin/audit/file"
credAppRole "github.com/hashicorp/vault/builtin/credential/approle"
"github.com/hashicorp/vault/command/agent"
agentConfig "github.com/hashicorp/vault/command/agent/config"
@@ -3105,6 +3107,149 @@ vault {
}
}
+// TestAgent_TokenRenewal tests that LifeTimeWatcher does not make
+// many renewal attempts if the token's policy does not allow for it to renew
+// itself. Prior to a bug fix in the PR that added this test, this would have resulted
+// in hundreds of token renewal requests with no backoff.
+func TestAgent_TokenRenewal(t *testing.T) {
+ logger := logging.NewVaultLogger(hclog.Trace)
+ coreConfig := &vault.CoreConfig{
+ AuditBackends: map[string]audit.Factory{
+ "file": auditFile.Factory,
+ },
+ }
+
+ cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
+ HandlerFunc: vaulthttp.Handler,
+ })
+ cluster.Start()
+ defer cluster.Cleanup()
+
+ serverClient := cluster.Cores[0].Client
+
+ auditLogFileName := makeTempFile(t, "audit-log", "")
+ err := serverClient.Sys().EnableAuditWithOptions("file-audit-for-TestAgent_TokenRenewal", &api.EnableAuditOptions{
+ Type: "file",
+ Options: map[string]string{
+ "file_path": auditLogFileName,
+ },
+ })
+ require.NoError(t, err)
+
+ // Unset the environment variable so that agent picks up the right test
+ // cluster address
+ defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress))
+ os.Unsetenv(api.EnvVaultAddress)
+
+ policyName := "less-than-default"
+ // Has a subset of the default policy's permissions
+ // Specifically removing renew-self.
+ err = serverClient.Sys().PutPolicy(policyName, `
+path "auth/token/lookup-self" {
+ capabilities = ["read"]
+}
+
+# Allow tokens to revoke themselves
+path "auth/token/revoke-self" {
+ capabilities = ["update"]
+}
+
+# Allow a token to look up its own capabilities on a path
+path "sys/capabilities-self" {
+ capabilities = ["update"]
+}
+`)
+ require.NoError(t, err)
+
+ renewable := true
+ // Make the token renewable but give it no permissions
+ // (e.g. the permission to renew itself)
+ tokenCreateRequest := &api.TokenCreateRequest{
+ Policies: []string{policyName},
+ TTL: "10s",
+ Renewable: &renewable,
+ NoDefaultPolicy: true,
+ }
+
+ secret, err := serverClient.Auth().Token().CreateOrphan(tokenCreateRequest)
+ require.NoError(t, err)
+ lowPermissionToken := secret.Auth.ClientToken
+
+ tokenFileName := makeTempFile(t, "token-file", lowPermissionToken)
+
+ sinkFileName := makeTempFile(t, "sink-file", "")
+
+ autoAuthConfig := fmt.Sprintf(`
+auto_auth {
+ method {
+ type = "token_file"
+ config = {
+ token_file_path = "%s"
+ }
+ }
+
+ sink "file" {
+ config = {
+ path = "%s"
+ }
+ }
+}`, tokenFileName, sinkFileName)
+
+ config := fmt.Sprintf(`
+vault {
+ address = "%s"
+ tls_skip_verify = true
+}
+
+log_level = "trace"
+
+%s
+`, serverClient.Address(), autoAuthConfig)
+ configPath := makeTempFile(t, "config.hcl", config)
+
+ // Start the agent
+ ui, cmd := testAgentCommand(t, logger)
+
+ cmd.startedCh = make(chan struct{})
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+ go func() {
+ cmd.Run([]string{"-config", configPath})
+ wg.Done()
+ }()
+
+ select {
+ case <-cmd.startedCh:
+ case <-time.After(5 * time.Second):
+ t.Errorf("timeout")
+ t.Errorf("stdout: %s", ui.OutputWriter.String())
+ t.Errorf("stderr: %s", ui.ErrorWriter.String())
+ }
+
+ // Sleep, to allow the renewal/auth process to work and ensure that it doesn't
+ // go crazy with renewals.
+ time.Sleep(30 * time.Second)
+
+ fileBytes, err := os.ReadFile(auditLogFileName)
+ require.NoError(t, err)
+ stringAudit := string(fileBytes)
+
+ // This is a bit of an imperfect way to test things, but we want to make sure
+ // that a token like this doesn't keep triggering retries.
+ // Due to the fact this is an auto-auth specific thing, unit tests for the
+ // LifetimeWatcher wouldn't be sufficient here.
+ // Prior to the fix made in the same PR this test was added, it would trigger many, many
+ // retries (hundreds to thousands in less than a minute).
+ // We really want to make sure that doesn't happen.
+ numberOfRenewSelves := strings.Count(stringAudit, "auth/token/renew-self")
+ // We actually expect ~6, but I added some buffer for CI weirdness. It can also vary
+ // due to the grace added/removed from the sleep in LifetimeWatcher too.
+ if numberOfRenewSelves > 10 {
+ t.Fatalf("did too many renews -- Vault received %d renew-self requests", numberOfRenewSelves)
+ }
+}
+
// TestAgent_Logging_ConsulTemplate attempts to ensure two things about Vault Agent logs:
// 1. When -log-format command line arg is set to JSON, it is honored as the output format
// for messages generated from within the consul-template library.
diff --git a/command/agentproxyshared/auth/auth.go b/command/agentproxyshared/auth/auth.go
index 0017acd34cfc..afc71d110da1 100644
--- a/command/agentproxyshared/auth/auth.go
+++ b/command/agentproxyshared/auth/auth.go
@@ -7,6 +7,7 @@ import (
"context"
"encoding/json"
"errors"
+ "fmt"
"math"
"math/rand"
"net/http"
@@ -467,10 +468,9 @@ func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error {
}
metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "success"}, 1)
- // We don't want to trigger the renewal process for tokens with
- // unlimited TTL, such as the root token.
- if leaseDuration == 0 && isTokenFileMethod {
- ah.logger.Info("not starting token renewal process, as token has unlimited TTL")
+ // We don't want to trigger the renewal process for the root token
+ if isRootToken(leaseDuration, isTokenFileMethod, secret) {
+ ah.logger.Info("not starting token renewal process, as token is root token")
} else {
ah.logger.Info("starting renewal process")
go watcher.Renew()
@@ -485,11 +485,31 @@ func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error {
break LifetimeWatcherLoop
case err := <-watcher.DoneCh():
- ah.logger.Info("lifetime watcher done channel triggered")
+ ah.logger.Info("lifetime watcher done channel triggered, re-authenticating")
if err != nil {
+ ah.logger.Error("error renewing token", "error", err, "backoff", backoffCfg)
metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1)
- ah.logger.Error("error renewing token", "error", err)
+
+ // Add some exponential backoff so that if auth is successful
+ // but the watcher errors, we won't go into an immediate
+ // aggressive retry loop.
+ // This might be quite a small sleep, since if we have a successful
+ // auth, we reset the backoff. Still, some backoff is important, and
+ // ensuring we follow the normal flow is important:
+ // auth -> try to renew
+ if !backoffSleep(ctx, backoffCfg) {
+ // We're at max retries. Return an error.
+ return fmt.Errorf("exceeded max retries failing to renew auth token")
+ }
+ }
+
+ // If the lease duration is 0, wait a second before re-authenticating
+ // so that we don't go into a loop, as the LifetimeWatcher will immediately
+ // return for tokens like this.
+ if leaseDuration == 0 {
+ time.Sleep(1 * time.Second)
}
+
break LifetimeWatcherLoop
case <-watcher.RenewCh():
@@ -504,6 +524,24 @@ func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error {
}
}
+// isRootToken checks if the secret in the argument is the root token
+// This is determinable without leaseDuration and isTokenFileMethod,
+// but those make it easier to rule out other tokens cheaply.
+func isRootToken(leaseDuration int, isTokenFileMethod bool, secret *api.Secret) bool {
+ // This check is cheaper than the others, so we do this first.
+ if leaseDuration == 0 && isTokenFileMethod && !secret.Renewable {
+ if secret != nil {
+ policies, err := secret.TokenPolicies()
+ if err == nil {
+ if len(policies) == 1 && policies[0] == "root" {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
// autoAuthBackoff tracks exponential backoff state.
type autoAuthBackoff struct {
backoff *backoff.Backoff
diff --git a/command/debug.go b/command/debug.go
index 09df88fb4d60..e81bc30d1edc 100644
--- a/command/debug.go
+++ b/command/debug.go
@@ -4,10 +4,12 @@
package command
import (
+ "archive/tar"
+ "compress/gzip"
"context"
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"net/url"
"os"
"path/filepath"
@@ -26,7 +28,6 @@ import (
"github.com/hashicorp/vault/sdk/helper/jsonutil"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/version"
- "github.com/mholt/archiver/v3"
"github.com/oklog/run"
"github.com/posener/complete"
)
@@ -374,7 +375,7 @@ func (c *DebugCommand) generateIndex() error {
}
// Write out file
- if err := ioutil.WriteFile(filepath.Join(c.flagOutput, "index.json"), bytes, 0o600); err != nil {
+ if err := os.WriteFile(filepath.Join(c.flagOutput, "index.json"), bytes, 0o600); err != nil {
return fmt.Errorf("error generating index file; %s", err)
}
@@ -777,7 +778,7 @@ func (c *DebugCommand) collectPprof(ctx context.Context) {
return
}
- err = ioutil.WriteFile(filepath.Join(dirName, target+".prof"), data, 0o600)
+ err = os.WriteFile(filepath.Join(dirName, target+".prof"), data, 0o600)
if err != nil {
c.captureError("pprof."+target, err)
}
@@ -795,13 +796,13 @@ func (c *DebugCommand) collectPprof(ctx context.Context) {
return
}
- err = ioutil.WriteFile(filepath.Join(dirName, "goroutines.txt"), data, 0o600)
+ err = os.WriteFile(filepath.Join(dirName, "goroutines.txt"), data, 0o600)
if err != nil {
c.captureError("pprof.goroutines-text", err)
}
}()
- // If the our remaining duration is less than the interval value
+ // If our remaining duration is less than the interval value
// skip profile and trace.
runDuration := currentTimestamp.Sub(startTime)
if (c.flagDuration+debugDurationGrace)-runDuration < c.flagInterval {
@@ -819,7 +820,7 @@ func (c *DebugCommand) collectPprof(ctx context.Context) {
return
}
- err = ioutil.WriteFile(filepath.Join(dirName, "profile.prof"), data, 0o600)
+ err = os.WriteFile(filepath.Join(dirName, "profile.prof"), data, 0o600)
if err != nil {
c.captureError("pprof.profile", err)
}
@@ -835,7 +836,7 @@ func (c *DebugCommand) collectPprof(ctx context.Context) {
return
}
- err = ioutil.WriteFile(filepath.Join(dirName, "trace.out"), data, 0o600)
+ err = os.WriteFile(filepath.Join(dirName, "trace.out"), data, 0o600)
if err != nil {
c.captureError("pprof.trace", err)
}
@@ -971,7 +972,7 @@ func (c *DebugCommand) persistCollection(collection []map[string]interface{}, ou
if err != nil {
return err
}
- if err := ioutil.WriteFile(filepath.Join(c.flagOutput, outFile), bytes, 0o600); err != nil {
+ if err := os.WriteFile(filepath.Join(c.flagOutput, outFile), bytes, 0o600); err != nil {
return err
}
@@ -983,14 +984,100 @@ func (c *DebugCommand) compress(dst string) error {
defer osutil.Umask(osutil.Umask(0o077))
}
- tgz := archiver.NewTarGz()
- if err := tgz.Archive([]string{c.flagOutput}, dst); err != nil {
- return fmt.Errorf("failed to compress data: %s", err)
+ if err := archiveToTgz(c.flagOutput, dst); err != nil {
+ return fmt.Errorf("failed to compress data: %w", err)
}
// If everything is fine up to this point, remove original directory
if err := os.RemoveAll(c.flagOutput); err != nil {
- return fmt.Errorf("failed to remove data directory: %s", err)
+ return fmt.Errorf("failed to remove data directory: %w", err)
+ }
+
+ return nil
+}
+
+// archiveToTgz compresses all the files in sourceDir to a
+// a tarball at destination.
+func archiveToTgz(sourceDir, destination string) error {
+ file, err := os.Create(destination)
+ if err != nil {
+ return fmt.Errorf("failed to create file: %w", err)
+ }
+ defer file.Close()
+
+ gzipWriter := gzip.NewWriter(file)
+ defer gzipWriter.Close()
+
+ tarWriter := tar.NewWriter(gzipWriter)
+ defer tarWriter.Close()
+
+ err = filepath.Walk(sourceDir,
+ func(filePath string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ return addFileToTar(sourceDir, filePath, tarWriter)
+ })
+
+ return err
+}
+
+// addFileToTar takes a file at filePath and adds it to the tar
+// being written to by tarWriter, alongside its header.
+// The tar header name will be relative. Example: If we're tarring
+// a file in ~/a/b/c/foo/bar.json, the header name will be foo/bar.json
+func addFileToTar(sourceDir, filePath string, tarWriter *tar.Writer) error {
+ file, err := os.Open(filePath)
+ if err != nil {
+ return fmt.Errorf("failed to open file %q: %w", filePath, err)
+ }
+ defer file.Close()
+
+ stat, err := file.Stat()
+ if err != nil {
+ return fmt.Errorf("failed to stat file %q: %w", filePath, err)
+ }
+
+ var link string
+ mode := stat.Mode()
+ if mode&os.ModeSymlink != 0 {
+ if link, err = os.Readlink(filePath); err != nil {
+ return fmt.Errorf("failed to read symlink for file %q: %w", filePath, err)
+ }
+ }
+ tarHeader, err := tar.FileInfoHeader(stat, link)
+ if err != nil {
+ return fmt.Errorf("failed to create tar header for file %q: %w", filePath, err)
+ }
+
+ // The tar header name should be relative, so remove the sourceDir from it,
+ // but preserve the last directory name.
+ // Example: If we're tarring a file in ~/a/b/c/foo/bar.json
+ // The name should be foo/bar.json
+ sourceDirExceptLastDir := filepath.Dir(sourceDir)
+ headerName := strings.TrimPrefix(filepath.Clean(filePath), filepath.Clean(sourceDirExceptLastDir)+"/")
+
+ // Directories should end with a slash.
+ if stat.IsDir() && !strings.HasSuffix(headerName, "/") {
+ headerName += "/"
+ }
+ tarHeader.Name = headerName
+
+ err = tarWriter.WriteHeader(tarHeader)
+ if err != nil {
+ return fmt.Errorf("failed to write tar header for file %q: %w", filePath, err)
+ }
+
+ // If it's not a regular file (e.g. link or directory) we shouldn't
+ // copy the file. The body of a tar entry (i.e. what's done by the
+ // below io.Copy call) is only required for tar files of TypeReg.
+ if tarHeader.Typeflag != tar.TypeReg {
+ return nil
+ }
+
+ _, err = io.Copy(tarWriter, file)
+ if err != nil {
+ return fmt.Errorf("failed to copy file %q into tarball: %w", filePath, err)
}
return nil
@@ -1007,7 +1094,7 @@ func pprofTarget(ctx context.Context, client *api.Client, target string, params
}
defer resp.Body.Close()
- data, err := ioutil.ReadAll(resp.Body)
+ data, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
@@ -1027,7 +1114,7 @@ func pprofProfile(ctx context.Context, client *api.Client, duration time.Duratio
}
defer resp.Body.Close()
- data, err := ioutil.ReadAll(resp.Body)
+ data, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
@@ -1047,7 +1134,7 @@ func pprofTrace(ctx context.Context, client *api.Client, duration time.Duration)
}
defer resp.Body.Close()
- data, err := ioutil.ReadAll(resp.Body)
+ data, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
diff --git a/command/debug_test.go b/command/debug_test.go
index 279c48f0a5ac..16d297bf920f 100644
--- a/command/debug_test.go
+++ b/command/debug_test.go
@@ -5,9 +5,10 @@ package command
import (
"archive/tar"
+ "compress/gzip"
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"os"
"path/filepath"
"runtime"
@@ -18,7 +19,7 @@ import (
"github.com/hashicorp/cli"
"github.com/hashicorp/vault/api"
- "github.com/mholt/archiver/v3"
+ "github.com/stretchr/testify/require"
)
func testDebugCommand(tb testing.TB) (*cli.MockUi, *DebugCommand) {
@@ -35,11 +36,7 @@ func testDebugCommand(tb testing.TB) (*cli.MockUi, *DebugCommand) {
func TestDebugCommand_Run(t *testing.T) {
t.Parallel()
- testDir, err := ioutil.TempDir("", "vault-debug")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(testDir)
+ testDir := t.TempDir()
cases := []struct {
name string
@@ -104,6 +101,54 @@ func TestDebugCommand_Run(t *testing.T) {
}
}
+// expectHeaderNamesInTarGzFile asserts that the expectedHeaderNames
+// match exactly to the header names in the tar.gz file at tarballPath.
+// Will error if there are more or less than expected.
+// ignoreUnexpectedHeaders toggles ignoring the presence of headers not
+// in expectedHeaderNames.
+func expectHeaderNamesInTarGzFile(t *testing.T, tarballPath string, expectedHeaderNames []string, ignoreUnexpectedHeaders bool) {
+ t.Helper()
+
+ file, err := os.Open(tarballPath)
+ require.NoError(t, err)
+
+ uncompressedStream, err := gzip.NewReader(file)
+ require.NoError(t, err)
+
+ tarReader := tar.NewReader(uncompressedStream)
+ headersFoundMap := make(map[string]any)
+
+ for {
+ header, err := tarReader.Next()
+ if err == io.EOF {
+ // We're at the end of the tar.
+ break
+ }
+ require.NoError(t, err)
+
+ // Ignore directories.
+ if header.Typeflag == tar.TypeDir {
+ continue
+ }
+
+ for _, name := range expectedHeaderNames {
+ if header.Name == name {
+ headersFoundMap[header.Name] = struct{}{}
+ }
+ }
+ if _, ok := headersFoundMap[header.Name]; !ok && !ignoreUnexpectedHeaders {
+ t.Fatalf("unexpected file: %s", header.Name)
+ }
+ }
+
+ // Expect that every expectedHeader was found at some point
+ for _, name := range expectedHeaderNames {
+ if _, ok := headersFoundMap[name]; !ok {
+ t.Fatalf("missing header from tar: %s", name)
+ }
+ }
+}
+
func TestDebugCommand_Archive(t *testing.T) {
t.Parallel()
@@ -137,11 +182,7 @@ func TestDebugCommand_Archive(t *testing.T) {
// Create temp dirs for each test case since os.Stat and tgz.Walk
// (called down below) exhibits raciness otherwise.
- testDir, err := ioutil.TempDir("", "vault-debug")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(testDir)
+ testDir := t.TempDir()
client, closer := testVaultServer(t)
defer closer()
@@ -177,32 +218,14 @@ func TestDebugCommand_Archive(t *testing.T) {
}
bundlePath := filepath.Join(testDir, basePath+expectedExt)
- _, err = os.Stat(bundlePath)
+ _, err := os.Stat(bundlePath)
if os.IsNotExist(err) {
t.Log(ui.OutputWriter.String())
t.Fatal(err)
}
- tgz := archiver.NewTarGz()
- err = tgz.Walk(bundlePath, func(f archiver.File) error {
- fh, ok := f.Header.(*tar.Header)
- if !ok {
- return fmt.Errorf("invalid file header: %#v", f.Header)
- }
-
- // Ignore base directory and index file
- if fh.Name == basePath+"/" || fh.Name == filepath.Join(basePath, "index.json") {
- return nil
- }
-
- if fh.Name != filepath.Join(basePath, "server_status.json") {
- return fmt.Errorf("unexpected file: %s", fh.Name)
- }
- return nil
- })
- if err != nil {
- t.Fatal(err)
- }
+ expectedHeaders := []string{filepath.Join(basePath, "index.json"), filepath.Join(basePath, "server_status.json")}
+ expectHeaderNamesInTarGzFile(t, bundlePath, expectedHeaders, false)
})
}
}
@@ -258,11 +281,7 @@ func TestDebugCommand_CaptureTargets(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
- testDir, err := ioutil.TempDir("", "vault-debug")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(testDir)
+ testDir := t.TempDir()
client, closer := testVaultServer(t)
defer closer()
@@ -287,45 +306,22 @@ func TestDebugCommand_CaptureTargets(t *testing.T) {
}
bundlePath := filepath.Join(testDir, basePath+debugCompressionExt)
- _, err = os.Open(bundlePath)
+ _, err := os.Open(bundlePath)
if err != nil {
t.Fatalf("failed to open archive: %s", err)
}
- tgz := archiver.NewTarGz()
- err = tgz.Walk(bundlePath, func(f archiver.File) error {
- fh, ok := f.Header.(*tar.Header)
- if !ok {
- t.Fatalf("invalid file header: %#v", f.Header)
- }
-
- // Ignore base directory and index file
- if fh.Name == basePath+"/" || fh.Name == filepath.Join(basePath, "index.json") {
- return nil
- }
-
- for _, fileName := range tc.expectedFiles {
- if fh.Name == filepath.Join(basePath, fileName) {
- return nil
- }
- }
-
- // If we reach here, it means that this is an unexpected file
- return fmt.Errorf("unexpected file: %s", fh.Name)
- })
- if err != nil {
- t.Fatal(err)
+ expectedHeaders := []string{filepath.Join(basePath, "index.json")}
+ for _, fileName := range tc.expectedFiles {
+ expectedHeaders = append(expectedHeaders, filepath.Join(basePath, fileName))
}
+ expectHeaderNamesInTarGzFile(t, bundlePath, expectedHeaders, false)
})
}
}
func TestDebugCommand_Pprof(t *testing.T) {
- testDir, err := ioutil.TempDir("", "vault-debug")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(testDir)
+ testDir := t.TempDir()
client, closer := testVaultServer(t)
defer closer()
@@ -379,11 +375,7 @@ func TestDebugCommand_Pprof(t *testing.T) {
func TestDebugCommand_IndexFile(t *testing.T) {
t.Parallel()
- testDir, err := ioutil.TempDir("", "vault-debug")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(testDir)
+ testDir := t.TempDir()
client, closer := testVaultServer(t)
defer closer()
@@ -409,7 +401,7 @@ func TestDebugCommand_IndexFile(t *testing.T) {
t.Fatalf("expected %d to be %d", code, exp)
}
- content, err := ioutil.ReadFile(filepath.Join(outputPath, "index.json"))
+ content, err := os.ReadFile(filepath.Join(outputPath, "index.json"))
if err != nil {
t.Fatal(err)
}
@@ -426,11 +418,7 @@ func TestDebugCommand_IndexFile(t *testing.T) {
func TestDebugCommand_TimingChecks(t *testing.T) {
t.Parallel()
- testDir, err := ioutil.TempDir("", "vault-debug")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(testDir)
+ testDir := t.TempDir()
cases := []struct {
name string
@@ -585,11 +573,7 @@ func TestDebugCommand_OutputExists(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
- testDir, err := ioutil.TempDir("", "vault-debug")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(testDir)
+ testDir := t.TempDir()
client, closer := testVaultServer(t)
defer closer()
@@ -602,12 +586,12 @@ func TestDebugCommand_OutputExists(t *testing.T) {
// Create a conflicting file/directory
if tc.compress {
- _, err = os.Create(outputPath)
+ _, err := os.Create(outputPath)
if err != nil {
t.Fatal(err)
}
} else {
- err = os.Mkdir(outputPath, 0o700)
+ err := os.Mkdir(outputPath, 0o700)
if err != nil {
t.Fatal(err)
}
@@ -639,11 +623,7 @@ func TestDebugCommand_OutputExists(t *testing.T) {
func TestDebugCommand_PartialPermissions(t *testing.T) {
t.Parallel()
- testDir, err := ioutil.TempDir("", "vault-debug")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(testDir)
+ testDir := t.TempDir()
client, closer := testVaultServer(t)
defer closer()
@@ -680,38 +660,14 @@ func TestDebugCommand_PartialPermissions(t *testing.T) {
t.Fatalf("failed to open archive: %s", err)
}
- tgz := archiver.NewTarGz()
- err = tgz.Walk(bundlePath, func(f archiver.File) error {
- fh, ok := f.Header.(*tar.Header)
- if !ok {
- t.Fatalf("invalid file header: %#v", f.Header)
- }
-
- // Ignore base directory and index file
- if fh.Name == basePath+"/" {
- return nil
- }
-
- // Ignore directories, which still get created by pprof but should
- // otherwise be empty.
- if fh.FileInfo().IsDir() {
- return nil
- }
-
- switch {
- case fh.Name == filepath.Join(basePath, "index.json"):
- case fh.Name == filepath.Join(basePath, "replication_status.json"):
- case fh.Name == filepath.Join(basePath, "server_status.json"):
- case fh.Name == filepath.Join(basePath, "vault.log"):
- default:
- return fmt.Errorf("unexpected file: %s", fh.Name)
- }
-
- return nil
- })
- if err != nil {
- t.Fatal(err)
+ expectedHeaders := []string{
+ filepath.Join(basePath, "index.json"), filepath.Join(basePath, "server_status.json"),
+ filepath.Join(basePath, "vault.log"),
}
+
+ // We set ignoreUnexpectedHeaders to true as replication_status.json is only sometimes
+ // produced. Relying on it being or not being there would be racy.
+ expectHeaderNamesInTarGzFile(t, bundlePath, expectedHeaders, true)
}
// set insecure umask to see if the files and directories get created with right permissions
@@ -748,11 +704,7 @@ func TestDebugCommand_InsecureUmask(t *testing.T) {
// set insecure umask
defer syscall.Umask(syscall.Umask(0))
- testDir, err := ioutil.TempDir("", "vault-debug")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(testDir)
+ testDir := t.TempDir()
client, closer := testVaultServer(t)
defer closer()
@@ -796,20 +748,22 @@ func TestDebugCommand_InsecureUmask(t *testing.T) {
// check permissions of the files within the parent directory
switch tc.compress {
case true:
- tgz := archiver.NewTarGz()
+ file, err := os.Open(bundlePath)
+ require.NoError(t, err)
- err = tgz.Walk(bundlePath, func(f archiver.File) error {
- fh, ok := f.Header.(*tar.Header)
- if !ok {
- return fmt.Errorf("invalid file header: %#v", f.Header)
- }
- err = isValidFilePermissions(fh.FileInfo())
- if err != nil {
- t.Fatalf(err.Error())
- }
- return nil
- })
+ uncompressedStream, err := gzip.NewReader(file)
+ require.NoError(t, err)
+
+ tarReader := tar.NewReader(uncompressedStream)
+ for {
+ header, err := tarReader.Next()
+ if err == io.EOF {
+ break
+ }
+ err = isValidFilePermissions(header.FileInfo())
+ require.NoError(t, err)
+ }
case false:
err = filepath.Walk(bundlePath, func(path string, info os.FileInfo, err error) error {
err = isValidFilePermissions(info)
@@ -820,9 +774,7 @@ func TestDebugCommand_InsecureUmask(t *testing.T) {
})
}
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
})
}
}
diff --git a/command/secrets_enable.go b/command/secrets_enable.go
index d02bd69d459f..a73a5e49ef87 100644
--- a/command/secrets_enable.go
+++ b/command/secrets_enable.go
@@ -41,6 +41,7 @@ type SecretsEnableCommand struct {
flagExternalEntropyAccess bool
flagVersion int
flagAllowedManagedKeys []string
+ flagDelegatedAuthAccessors []string
flagIdentityTokenKey string
}
@@ -229,6 +230,14 @@ func (c *SecretsEnableCommand) Flags() *FlagSets {
"each time with 1 key.",
})
+ f.StringSliceVar(&StringSliceVar{
+ Name: flagNameDelegatedAuthAccessors,
+ Target: &c.flagDelegatedAuthAccessors,
+ Usage: "A list of permitted authentication accessors this backend can delegate authentication to. " +
+ "Note that multiple values may be specified by providing this option multiple times, " +
+ "each time with 1 accessor.",
+ })
+
f.StringVar(&StringVar{
Name: flagNameIdentityTokenKey,
Target: &c.flagIdentityTokenKey,
@@ -339,6 +348,10 @@ func (c *SecretsEnableCommand) Run(args []string) int {
mountInput.Config.AllowedManagedKeys = c.flagAllowedManagedKeys
}
+ if fl.Name == flagNameDelegatedAuthAccessors {
+ mountInput.Config.DelegatedAuthAccessors = c.flagDelegatedAuthAccessors
+ }
+
if fl.Name == flagNamePluginVersion {
mountInput.Config.PluginVersion = c.flagPluginVersion
}
diff --git a/command/secrets_enable_test.go b/command/secrets_enable_test.go
index 3d6766b53e35..3efc171a7be1 100644
--- a/command/secrets_enable_test.go
+++ b/command/secrets_enable_test.go
@@ -119,6 +119,7 @@ func TestSecretsEnableCommand_Run(t *testing.T) {
"-allowed-response-headers", "authorization",
"-allowed-managed-keys", "key1,key2",
"-identity-token-key", "default",
+ "-delegated-auth-accessors", "authAcc1,authAcc2",
"-force-no-cache",
"pki",
})
@@ -171,6 +172,9 @@ func TestSecretsEnableCommand_Run(t *testing.T) {
if diff := deep.Equal([]string{"key1,key2"}, mountInfo.Config.AllowedManagedKeys); len(diff) > 0 {
t.Errorf("Failed to find expected values in AllowedManagedKeys. Difference is: %v", diff)
}
+ if diff := deep.Equal([]string{"authAcc1,authAcc2"}, mountInfo.Config.DelegatedAuthAccessors); len(diff) > 0 {
+ t.Errorf("Failed to find expected values in DelegatedAuthAccessors. Difference is: %v", diff)
+ }
if diff := deep.Equal("default", mountInfo.Config.IdentityTokenKey); len(diff) > 0 {
t.Errorf("Failed to find expected values in IdentityTokenKey. Difference is: %v", diff)
}
diff --git a/command/secrets_tune_test.go b/command/secrets_tune_test.go
index 5bd70a0f0deb..b2d932779fd8 100644
--- a/command/secrets_tune_test.go
+++ b/command/secrets_tune_test.go
@@ -195,6 +195,7 @@ func TestSecretsTuneCommand_Run(t *testing.T) {
"-identity-token-key", "default",
"-listing-visibility", "unauth",
"-plugin-version", version,
+ "-delegated-auth-accessors", "authAcc1,authAcc2",
"mount_tune_integration/",
})
if exp := 0; code != exp {
@@ -246,6 +247,9 @@ func TestSecretsTuneCommand_Run(t *testing.T) {
if diff := deep.Equal([]string{"key1,key2"}, mountInfo.Config.AllowedManagedKeys); len(diff) > 0 {
t.Errorf("Failed to find expected values in AllowedManagedKeys. Difference is: %v", diff)
}
+ if diff := deep.Equal([]string{"authAcc1,authAcc2"}, mountInfo.Config.DelegatedAuthAccessors); len(diff) > 0 {
+ t.Errorf("Failed to find expected values in DelegatedAuthAccessors. Difference is: %v", diff)
+ }
if diff := deep.Equal("default", mountInfo.Config.IdentityTokenKey); len(diff) > 0 {
t.Errorf("Failed to find expected values in IdentityTokenKey. Difference is: %v", diff)
}
diff --git a/enos/README.md b/enos/README.md
index a33f4abe1b9e..1ec6b8e13d4b 100644
--- a/enos/README.md
+++ b/enos/README.md
@@ -18,34 +18,35 @@ is going to give you faster feedback and execution time, whereas Enos is going
to give you a real-world execution and validation of the requirement. Consider
the following cases as examples of when one might opt for an Enos scenario:
-* The feature require third-party integrations. Whether that be networked
+- The feature require third-party integrations. Whether that be networked
dependencies like a real Consul backend, a real KMS key to test awskms
auto-unseal, auto-join discovery using AWS tags, or Cloud hardware KMS's.
-* The feature might behave differently under multiple configuration variants
+- The feature might behave differently under multiple configuration variants
and therefore should be tested with both combinations, e.g. auto-unseal and
manual shamir unseal or replication in HA mode with integrated storage or
Consul storage.
-* The scenario requires coordination between multiple targets. For example,
+- The scenario requires coordination between multiple targets. For example,
consider the complex lifecycle event of migrating the seal type or storage,
or manually triggering a raft disaster scenario by partitioning the network
between the leader and follower nodes. Or perhaps an auto-pilot upgrade between
a stable version of Vault and our candidate version.
-* The scenario has specific deployment strategy requirements. For example,
+- The scenario has specific deployment strategy requirements. For example,
if we want to add a regression test for an issue that only arises when the
software is deployed in a certain manner.
-* The scenario needs to use actual build artifacts that will be promoted
+- The scenario needs to use actual build artifacts that will be promoted
through the pipeline.
## Requirements
-* AWS access. HashiCorp Vault developers should use Doormat.
-* Terraform >= 1.2
-* Enos >= v0.0.10. You can [install it from a release channel](https://github.com/hashicorp/Enos-Docs/blob/main/installation.md).
-* Access to the QTI org in Terraform Cloud. HashiCorp Vault developers can
- access a shared token in 1Password or request their own in #team-quality on
- Slack.
-* An SSH keypair in the AWS region you wish to run the scenario. You can use
+- AWS access. HashiCorp Vault developers should use Doormat.
+- Terraform >= 1.7
+- Enos >= v0.0.28. You can [download a release](https://github.com/hashicorp/enos/releases/) or
+ install it with Homebrew:
+ ```shell
+ brew tap hashicorp/tap && brew update && brew install hashicorp/tap/enos
+ ```
+- An SSH keypair in the AWS region you wish to run the scenario. You can use
Doormat to log in to the AWS console to create or upload an existing keypair.
-* A Vault artifact is downloaded from the GHA artifacts when using the `artifact_source:crt` variants, from Artifactory when using `artifact_source:artifactory`, and is built locally from the current branch when using `artifact_source:local` variant.
+- A Vault artifact is downloaded from the GHA artifacts when using the `artifact_source:crt` variants, from Artifactory when using `artifact_source:artifactory`, and is built locally from the current branch when using `artifact_source:local` variant.
## Scenario Variables
In CI, each scenario is executed via Github Actions and has been configured using
@@ -57,7 +58,6 @@ variables, or you can update `enos.vars.hcl` with values and uncomment the lines
Variables that are required:
* `aws_ssh_keypair_name`
* `aws_ssh_private_key_path`
-* `tfc_api_token`
* `vault_bundle_path`
* `vault_license_path` (only required for non-OSS editions)
@@ -206,7 +206,6 @@ This variant is for running the Enos scenario to test an artifact from Artifacto
* `artifactory_token`
* `aws_ssh_keypair_name`
* `aws_ssh_private_key_path`
-* `tfc_api_token`
* `vault_product_version`
* `vault_revision`
@@ -234,7 +233,6 @@ and destroyed each time a scenario is run, the Terraform state will be managed b
Here are the steps to configure the GitHub Actions service user:
#### Pre-requisites
-- Access to the `hashicorp-qti` organization in Terraform Cloud.
- Full access to the CI AWS account is required.
**Notes:**
diff --git a/enos/enos-dev-scenario-pr-replication.hcl b/enos/enos-dev-scenario-pr-replication.hcl
new file mode 100644
index 000000000000..54aaa6e6bae9
--- /dev/null
+++ b/enos/enos-dev-scenario-pr-replication.hcl
@@ -0,0 +1,911 @@
+# Copyright (c) HashiCorp, Inc.
+# SPDX-License-Identifier: BUSL-1.1
+
+scenario "dev_pr_replication" {
+ description = <<-EOF
+ This scenario spins up a two Vault clusters with either an external Consul cluster or
+ integrated Raft for storage. The secondary cluster is configured with performance replication
+ from the primary cluster. None of our test verification is included in this scenario in order
+ to improve end-to-end speed. If you wish to perform such verification you'll need to a non-dev
+ scenario.
+
+ The scenario supports finding and installing any released 'linux/amd64' or 'linux/arm64' Vault
+ artifact as long as its version is >= 1.8. You can also use the 'artifact:local' variant to
+ build and deploy the current branch!
+
+ In order to execute this scenario you'll need to install the enos CLI:
+ brew tap hashicorp/tap && brew update && brew install hashicorp/tap/enos
+
+ You'll also need access to an AWS account with an SSH keypair.
+ Perform the steps here to get AWS access with Doormat https://eng-handbook.hashicorp.services/internal-tools/enos/common-setup-steps/#authenticate-with-doormat
+ Perform the steps here to get an AWS keypair set up: https://eng-handbook.hashicorp.services/internal-tools/enos/common-setup-steps/#set-your-aws-key-pair-name-and-private-key
+
+ Please note that this scenario requires several inputs variables to be set in order to function
+ properly. While not all variants will require all variables, it's suggested that you look over
+ the scenario outline to determine which variables affect which steps and which have inputs that
+ you should set. You can use the following command to get a textual outline of the entire
+ scenario:
+ enos scenario outline dev_pr_replication
+
+ You can also create an HTML version that is suitable for viewing in web browsers:
+ enos scenario outline dev_pr_replication --format html > index.html
+ open index.html
+
+ To configure the required variables you have a couple of choices. You can create an
+ 'enos-local.vars' file in the same 'enos' directory where this scenario is defined. In it you
+ declare your desired variable values. For example, you could copy the following content and
+ then set the values as necessary:
+
+ artifactory_username = "username@hashicorp.com"
+ artifactory_token = "
+ aws_region = "us-west-2"
+ aws_ssh_keypair_name = ""
+ aws_ssh_keypair_key_path = "/path/to/your/private/key.pem"
+ dev_build_local_ui = false
+ dev_consul_version = "1.18.1"
+ vault_license_path = "./support/vault.hclic"
+ vault_product_version = "1.16.2"
+
+ Alternatively, you can set them in your environment:
+ export ENOS_VAR_aws_region="us-west-2"
+ export ENOS_VAR_vault_license_path="./support/vault.hclic"
+
+ After you've configured your inputs you can list and filter the available scenarios and then
+ subsequently launch and destroy them.
+ enos scenario list --help
+ enos scenario launch --help
+ enos scenario list dev_pr_replication
+ enos scenario launch dev_pr_replication arch:amd64 artifact:deb distro:ubuntu edition:ent.hsm primary_backend:raft primary_seal:awskms secondary_backend:raft secondary_seal:pkcs11
+
+ When the scenario is finished launching you refer to the scenario outputs to see information
+ related to your cluster. You can use this information to SSH into nodes and/or to interact
+ with vault.
+ enos scenario output dev_pr_replication arch:amd64 artifact:deb distro:ubuntu edition:ent.hsm primary_backend:raft primary_seal:awskms secondary_backend:raft secondary_seal:pkcs11
+ ssh -i /path/to/your/private/key.pem
+ vault status
+
+ After you've finished you can tear down the cluster
+ enos scenario destroy dev_pr_replication arch:amd64 artifact:deb distro:ubuntu edition:ent.hsm primary_backend:raft primary_seal:awskms secondary_backend:raft secondary_seal:pkcs11
+ EOF
+
+ // The matrix is where we define all the baseline combinations that enos can utilize to customize
+ // your scenario. By default enos attempts to perform your command an the entire product! Most
+ // of the time you'll want to reduce that by passing in a filter.
+ // Run 'enos scenario list --help' to see more about how filtering scenarios works in enos.
+ matrix {
+ arch = ["amd64", "arm64"]
+ artifact = ["local", "deb", "rpm", "zip"]
+ distro = ["ubuntu", "rhel"]
+ edition = ["ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
+ primary_backend = ["consul", "raft"]
+ primary_seal = ["awskms", "pkcs11", "shamir"]
+ secondary_backend = ["consul", "raft"]
+ secondary_seal = ["awskms", "pkcs11", "shamir"]
+
+ exclude {
+ edition = ["ent.hsm", "ent.fips1402", "ent.hsm.fips1402"]
+ arch = ["arm64"]
+ }
+
+ exclude {
+ artifact = ["rpm"]
+ distro = ["ubuntu"]
+ }
+
+ exclude {
+ artifact = ["deb"]
+ distro = ["rhel"]
+ }
+
+ exclude {
+ primary_seal = ["pkcs11"]
+ edition = ["ce", "ent", "ent.fips1402"]
+ }
+
+ exclude {
+ secondary_seal = ["pkcs11"]
+ edition = ["ce", "ent", "ent.fips1402"]
+ }
+ }
+
+ // Specify which Terraform configs and providers to use in this scenario. Most of the time you'll
+ // never need to change this! If you wanted to test with different terraform or terraform CLI
+ // settings you can define them and assign them here.
+ terraform_cli = terraform_cli.default
+ terraform = terraform.default
+
+ // Here we declare all of the providers that we might need for our scenario.
+ providers = [
+ provider.aws.default,
+ provider.enos.ubuntu,
+ provider.enos.rhel
+ ]
+
+ // These are variable values that are local to our scenario. They are evaluated after external
+ // variables and scenario matrices but before any of our steps.
+ locals {
+ // The enos provider uses different ssh transport configs for different distros (as
+ // specified in enos-providers.hcl), and we need to be able to access both of those here.
+ enos_provider = {
+ rhel = provider.enos.rhel
+ ubuntu = provider.enos.ubuntu
+ }
+ // We install vault packages from artifactory. If you wish to use one of these variants you'll
+ // need to configure your artifactory credentials.
+ use_artifactory = matrix.artifact == "deb" || matrix.artifact == "rpm"
+ // Zip bundles and local builds don't come with systemd units or any associated configuration.
+ // When this is true we'll let enos handle this for us.
+ manage_service = matrix.artifact == "zip" || matrix.artifact == "local"
+ // If you are using an ent edition, you will need a Vault license. Common convention
+ // is to store it at ./support/vault.hclic, but you may change this path according
+ // to your own preference.
+ vault_install_dir = matrix.artifact == "zip" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro]
+ }
+
+ // Begin scenario steps. These are the steps we'll perform to get your cluster up and running.
+ step "build_or_find_vault_artifact" {
+ description = <<-EOF
+ Depending on how we intend to get our Vault artifact, this step either builds vault from our
+ current branch or finds debian or redhat packages in Artifactory. If we're using a zip bundle
+ we'll get it from releases.hashicorp.com and skip this step entirely. Please note that if you
+ wish to use a deb or rpm artifact you'll have to configure your artifactory credentials!
+
+ Variables that are used in this step:
+
+ artifactory_host:
+ The artifactory host to search. It's very unlikely that you'll want to change this. The
+ default value is the HashiCorp Artifactory instance.
+ artifactory_repo
+ The artifactory host to search. It's very unlikely that you'll want to change this. The
+ default value is where CRT will publish packages.
+ artifactory_username
+ The artifactory username associated with your token. You'll need this if you wish to use
+ deb or rpm artifacts! You can request access via Okta.
+ artifactory_token
+ The artifactory token associated with your username. You'll need this if you wish to use
+ deb or rpm artifacts! You can create a token by logging into Artifactory via Okta.
+ vault_product_version:
+ When using the artifact:rpm or artifact:deb variants we'll use this variable to determine
+ which version of the Vault pacakge we should fetch from Artifactory.
+ vault_artifact_path:
+ When using the artifact:local variant we'll utilize this variable to determine where
+ to create the vault.zip archive from the local branch. Default: to /tmp/vault.zip.
+ vault_local_tags:
+ When using the artifact:local variant we'll use this variable to inject custom build
+ tags. If left unset we'll automatically use the build tags that correspond to the edition
+ variant.
+ EOF
+ module = matrix.artifact == "local" ? "build_local" : local.use_artifactory ? "build_artifactory_package" : "build_crt"
+
+ variables {
+ // Used for all modules
+ arch = matrix.arch
+ edition = matrix.edition
+ product_version = var.vault_product_version
+ // Required for the local build which will always result in using a local zip bundle
+ artifact_path = matrix.artifact == "local" ? abspath(var.vault_artifact_path) : null
+ build_ui = var.dev_build_local_ui
+ build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition]
+ goarch = matrix.arch
+ goos = "linux"
+ // Required when using a RPM or Deb package
+ // Some of these variables don't have default values so we'll only set them if they are
+ // required.
+ artifactory_host = local.use_artifactory ? var.artifactory_host : null
+ artifactory_repo = local.use_artifactory ? var.artifactory_repo : null
+ artifactory_username = local.use_artifactory ? var.artifactory_username : null
+ artifactory_token = local.use_artifactory ? var.artifactory_token : null
+ distro = matrix.distro
+ }
+ }
+
+ step "ec2_info" {
+ description = "This discovers usefull metadata in Ec2 like AWS AMI ID's that we use in later modules."
+ module = module.ec2_info
+ }
+
+ step "create_vpc" {
+ description = <<-EOF
+ Create the VPC resources required for our scenario.
+
+ Variables that are used in this step:
+ tags:
+ If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable
+ and they'll be added to resources when possible.
+ EOF
+ module = module.create_vpc
+ depends_on = [step.ec2_info]
+
+ variables {
+ common_tags = global.tags
+ }
+ }
+
+ step "read_backend_license" {
+ description = <<-EOF
+ Read the contents of the backend license if we're using a Consul backend for either cluster
+ and the backend_edition variable is set to "ent".
+
+ Variables that are used in this step:
+ backend_edition:
+ The edition of Consul to use. If left unset it will default to CE.
+ backend_license_path:
+ If this variable is set we'll use it to determine the local path on disk that contains a
+ Consul Enterprise license. If it is not set we'll attempt to load it from
+ ./support/consul.hclic.
+ EOF
+ skip_step = (var.backend_edition == "ce" || var.backend_edition == "oss") || (matrix.primary_backend == "raft" && matrix.secondary_backend == "raft")
+ module = module.read_license
+
+ variables {
+ file_name = global.backend_license_path
+ }
+ }
+
+ step "read_vault_license" {
+ description = <<-EOF
+ Validates and reads into memory the contents of a local Vault Enterprise license if we're
+ using an Enterprise edition. This step does not run when using a community edition of Vault.
+
+ Variables that are used in this step:
+ vault_license_path:
+ If this variable is set we'll use it to determine the local path on disk that contains a
+ Vault Enterprise license. If it is not set we'll attempt to load it from
+ ./support/vault.hclic.
+ EOF
+ module = module.read_license
+
+ variables {
+ file_name = global.vault_license_path
+ }
+ }
+
+ step "create_primary_seal_key" {
+ description = <<-EOF
+ Create the necessary seal keys depending on our configured seal.
+
+ Variables that are used in this step:
+ tags:
+ If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable
+ and they'll be added to resources when possible.
+ EOF
+ module = "seal_${matrix.primary_seal}"
+ depends_on = [step.create_vpc]
+
+ providers = {
+ enos = provider.enos.ubuntu
+ }
+
+ variables {
+ cluster_id = step.create_vpc.id
+ cluster_meta = "primary"
+ common_tags = global.tags
+ }
+ }
+
+ step "create_secondary_seal_key" {
+ description = <<-EOF
+ Create the necessary seal keys depending on our configured seal.
+
+ Variables that are used in this step:
+ tags:
+ If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable
+ and they'll be added to resources when possible.
+ EOF
+ module = "seal_${matrix.secondary_seal}"
+ depends_on = [step.create_vpc]
+
+ providers = {
+ enos = provider.enos.ubuntu
+ }
+
+ variables {
+ cluster_id = step.create_vpc.id
+ cluster_meta = "secondary"
+ common_tags = global.tags
+ other_resources = step.create_primary_seal_key.resource_names
+ }
+ }
+
+ step "create_primary_cluster_targets" {
+ description = <<-EOF
+ Creates the necessary machine infrastructure targets for the Vault cluster. We also ensure
+ that the firewall is configured to allow the necessary Vault and Consul traffic and SSH
+ from the machine executing the Enos scenario.
+
+ Variables that are used in this step:
+ aws_ssh_keypair_name:
+ The AWS SSH Keypair name to use for target machines.
+ project_name:
+ The project name is used for additional tag metadata on resources.
+ tags:
+ If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable
+ and they'll be added to resources when possible.
+ vault_instance_count:
+ How many instances to provision for the Vault cluster. If left unset it will use a default
+ of three.
+ EOF
+ module = module.target_ec2_instances
+ depends_on = [step.create_vpc]
+
+ providers = {
+ enos = local.enos_provider[matrix.distro]
+ }
+
+ variables {
+ ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]]
+ cluster_tag_key = global.vault_tag_key
+ common_tags = global.tags
+ seal_key_names = step.create_primary_seal_key.resource_names
+ vpc_id = step.create_vpc.id
+ }
+ }
+
+ step "create_primary_cluster_backend_targets" {
+ description = <<-EOF
+ Creates the necessary machine infrastructure targets for the backend Consul storage cluster.
+ We also ensure that the firewall is configured to allow the necessary Consul traffic and SSH
+ from the machine executing the Enos scenario. When using integrated storage this step is a
+ no-op that does nothing.
+
+ Variables that are used in this step:
+ tags:
+ If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable
+ and they'll be added to resources when possible.
+ project_name:
+ The project name is used for additional tag metadata on resources.
+ aws_ssh_keypair_name:
+ The AWS SSH Keypair name to use for target machines.
+ EOF
+ module = matrix.primary_backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim
+ depends_on = [step.create_vpc]
+
+ providers = {
+ enos = provider.enos.ubuntu
+ }
+
+ variables {
+ ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"]
+ cluster_tag_key = global.backend_tag_key
+ common_tags = global.tags
+ seal_key_names = step.create_primary_seal_key.resource_names
+ vpc_id = step.create_vpc.id
+ }
+ }
+
+ step "create_secondary_cluster_targets" {
+ description = <<-EOF
+ Creates the necessary machine infrastructure targets for the Vault cluster. We also ensure
+ that the firewall is configured to allow the necessary Vault and Consul traffic and SSH
+ from the machine executing the Enos scenario.
+ EOF
+ module = module.target_ec2_instances
+ depends_on = [step.create_vpc]
+
+ providers = {
+ enos = local.enos_provider[matrix.distro]
+ }
+
+ variables {
+ ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]]
+ cluster_tag_key = global.vault_tag_key
+ common_tags = global.tags
+ seal_key_names = step.create_secondary_seal_key.resource_names
+ vpc_id = step.create_vpc.id
+ }
+ }
+
+ step "create_secondary_cluster_backend_targets" {
+ description = <<-EOF
+ Creates the necessary machine infrastructure targets for the backend Consul storage cluster.
+ We also ensure that the firewall is configured to allow the necessary Consul traffic and SSH
+ from the machine executing the Enos scenario. When using integrated storage this step is a
+ no-op that does nothing.
+ EOF
+
+ module = matrix.secondary_backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim
+ depends_on = [step.create_vpc]
+
+ providers = {
+ enos = provider.enos.ubuntu
+ }
+
+ variables {
+ ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"]
+ cluster_tag_key = global.backend_tag_key
+ common_tags = global.tags
+ seal_key_names = step.create_secondary_seal_key.resource_names
+ vpc_id = step.create_vpc.id
+ }
+ }
+
+ step "create_primary_backend_cluster" {
+ description = <<-EOF
+ Install, configure, and start the backend Consul storage cluster for the primary Vault Cluster.
+ When we are using the raft storage variant this step is a no-op.
+
+ Variables that are used in this step:
+ backend_edition:
+ When configured with the backend:consul variant we'll utilize this variable to determine
+ the edition of Consul to use for the cluster. Note that if you set it to 'ent' you will
+ also need a valid license configured for the read_backend_license step. Default: ce.
+ dev_consul_version:
+ When configured with the backend:consul variant we'll utilize this variable to determine
+ the version of Consul to use for the cluster.
+ EOF
+ module = "backend_${matrix.primary_backend}"
+ depends_on = [
+ step.create_primary_cluster_backend_targets
+ ]
+
+ providers = {
+ enos = provider.enos.ubuntu
+ }
+
+ variables {
+ cluster_name = step.create_primary_cluster_backend_targets.cluster_name
+ cluster_tag_key = global.backend_tag_key
+ license = matrix.primary_backend == "consul" ? step.read_backend_license.license : null
+ release = {
+ edition = var.backend_edition
+ version = var.dev_consul_version
+ }
+ target_hosts = step.create_primary_cluster_backend_targets.hosts
+ }
+ }
+
+ step "create_primary_cluster" {
+ description = <<-EOF
+ Install, configure, start, initialize and unseal the primary Vault cluster on the specified
+ target instances.
+
+ Variables that are used in this step:
+ backend_edition:
+ When configured with the backend:consul variant we'll utilize this variable to determine
+ which version of the consul client to install on each node for Consul storage. Note that
+ if you set it to 'ent' you will also need a valid license configured for the
+ read_backend_license step. If left unset we'll use an unlicensed CE version.
+ dev_config_mode:
+ You can set this variable to instruct enos on how to primarily configure Vault when starting
+ the service. Options are 'file' and 'env' for configuration file or environment variables.
+ If left unset we'll use the default value.
+ dev_consul_version:
+ When configured with the backend:consul variant we'll utilize this variable to determine
+ which version of Consul to install. If left unset we'll utilize the default value.
+ vault_artifact_path:
+ When using the artifact:local variant this variable is utilized to specify where on
+ the local disk the vault.zip file we've built is located. It can be left unset to use
+ the default value.
+ vault_enable_audit_devices:
+ Whether or not to enable various audit devices after unsealing the Vault cluster. By default
+ we'll configure syslog, socket, and file auditing.
+ vault_product_version:
+ When using the artifact:zip variant this variable is utilized to specify the version of
+ Vault to download from releases.hashicorp.com.
+ EOF
+ module = module.vault_cluster
+ depends_on = [
+ step.create_primary_backend_cluster,
+ step.create_primary_cluster_targets,
+ step.build_or_find_vault_artifact,
+ ]
+
+ providers = {
+ enos = local.enos_provider[matrix.distro]
+ }
+
+ variables {
+ // We set vault_artifactory_release when we want to get a .deb or .rpm package from Artifactory.
+ // We set vault_release when we want to get a .zip bundle from releases.hashicorp.com
+ // We only set one or the other, never both.
+ artifactory_release = local.use_artifactory ? step.build_or_find_vault_artifact.release : null
+ backend_cluster_name = step.create_primary_cluster_backend_targets.cluster_name
+ backend_cluster_tag_key = global.backend_tag_key
+ cluster_name = step.create_primary_cluster_targets.cluster_name
+ config_mode = var.dev_config_mode
+ consul_license = matrix.primary_backend == "consul" ? step.read_backend_license.license : null
+ consul_release = matrix.primary_backend == "consul" ? {
+ edition = var.backend_edition
+ version = var.dev_consul_version
+ } : null
+ enable_audit_devices = var.vault_enable_audit_devices
+ install_dir = local.vault_install_dir
+ license = step.read_vault_license.license
+ local_artifact_path = matrix.artifact == "local" ? abspath(var.vault_artifact_path) : null
+ manage_service = local.manage_service
+ packages = concat(global.packages, global.distro_packages[matrix.distro])
+ release = matrix.artifact == "zip" ? { version = var.vault_product_version, edition = matrix.edition } : null
+ seal_attributes = step.create_primary_seal_key.attributes
+ seal_type = matrix.primary_seal
+ storage_backend = matrix.primary_backend
+ target_hosts = step.create_primary_cluster_targets.hosts
+ }
+ }
+
+ step "create_secondary_backend_cluster" {
+ description = <<-EOF
+ Install, configure, and start the backend Consul storage cluster for the primary Vault Cluster.
+ When we are using the raft storage variant this step is a no-op.
+
+ Variables that are used in this step:
+ backend_edition:
+ When configured with the backend:consul variant we'll utilize this variable to determine
+ the edition of Consul to use for the cluster. Note that if you set it to 'ent' you will
+ also need a valid license configured for the read_backend_license step. Default: ce.
+ dev_consul_version:
+ When configured with the backend:consul variant we'll utilize this variable to determine
+ the version of Consul to use for the cluster.
+ EOF
+ module = "backend_${matrix.secondary_backend}"
+ depends_on = [
+ step.create_secondary_cluster_backend_targets
+ ]
+
+ providers = {
+ enos = provider.enos.ubuntu
+ }
+
+ variables {
+ cluster_name = step.create_secondary_cluster_backend_targets.cluster_name
+ cluster_tag_key = global.backend_tag_key
+ license = matrix.secondary_backend == "consul" ? step.read_backend_license.license : null
+ release = {
+ edition = var.backend_edition
+ version = var.dev_consul_version
+ }
+ target_hosts = step.create_secondary_cluster_backend_targets.hosts
+ }
+ }
+
+ step "create_secondary_cluster" {
+ description = <<-EOF
+ Install, configure, start, initialize and unseal the secondary Vault cluster on the specified
+ target instances.
+
+ Variables that are used in this step:
+ backend_edition:
+ When configured with the backend:consul variant we'll utilize this variable to determine
+ which version of the consul client to install on each node for Consul storage. Note that
+ if you set it to 'ent' you will also need a valid license configured for the
+ read_backend_license step. If left unset we'll use an unlicensed CE version.
+ dev_config_mode:
+ You can set this variable to instruct enos on how to primarily configure Vault when starting
+ the service. Options are 'file' and 'env' for configuration file or environment variables.
+ If left unset we'll use the default value.
+ dev_consul_version:
+ When configured with the backend:consul variant we'll utilize this variable to determine
+ which version of Consul to install. If left unset we'll utilize the default value.
+ vault_artifact_path:
+ When using the artifact:local variant this variable is utilized to specify where on
+ the local disk the vault.zip file we've built is located. It can be left unset to use
+ the default value.
+ vault_enable_audit_devices:
+ Whether or not to enable various audit devices after unsealing the Vault cluster. By default
+ we'll configure syslog, socket, and file auditing.
+ vault_product_version:
+ When using the artifact:zip variant this variable is utilized to specify the version of
+ Vault to download from releases.hashicorp.com.
+ EOF
+ module = module.vault_cluster
+ depends_on = [
+ step.create_secondary_backend_cluster,
+ step.create_secondary_cluster_targets
+ ]
+
+ providers = {
+ enos = local.enos_provider[matrix.distro]
+ }
+
+ variables {
+ // We set vault_artifactory_release when we want to get a .deb or .rpm package from Artifactory.
+ // We set vault_release when we want to get a .zip bundle from releases.hashicorp.com
+ // We only set one or the other, never both.
+ artifactory_release = local.use_artifactory ? step.build_or_find_vault_artifact.release : null
+ backend_cluster_name = step.create_secondary_cluster_backend_targets.cluster_name
+ backend_cluster_tag_key = global.backend_tag_key
+ cluster_name = step.create_secondary_cluster_targets.cluster_name
+ config_mode = var.dev_config_mode
+ consul_license = matrix.secondary_backend == "consul" ? step.read_backend_license.license : null
+ consul_release = matrix.secondary_backend == "consul" ? {
+ edition = var.backend_edition
+ version = var.dev_consul_version
+ } : null
+ enable_audit_devices = var.vault_enable_audit_devices
+ install_dir = local.vault_install_dir
+ license = step.read_vault_license.license
+ local_artifact_path = matrix.artifact == "local" ? abspath(var.vault_artifact_path) : null
+ manage_service = local.manage_service
+ packages = concat(global.packages, global.distro_packages[matrix.distro])
+ release = matrix.artifact == "zip" ? { version = var.vault_product_version, edition = matrix.edition } : null
+ seal_attributes = step.create_secondary_seal_key.attributes
+ seal_type = matrix.secondary_seal
+ storage_backend = matrix.secondary_backend
+ target_hosts = step.create_secondary_cluster_targets.hosts
+ }
+ }
+
+ step "verify_that_vault_primary_cluster_is_unsealed" {
+ description = <<-EOF
+ Wait for the for the primary cluster to unseal and reach a healthy state.
+ EOF
+ module = module.vault_verify_unsealed
+ depends_on = [
+ step.create_primary_cluster
+ ]
+
+ providers = {
+ enos = local.enos_provider[matrix.distro]
+ }
+
+ variables {
+ vault_instances = step.create_primary_cluster_targets.hosts
+ vault_install_dir = local.vault_install_dir
+ }
+ }
+
+ step "verify_that_vault_secondary_cluster_is_unsealed" {
+ description = <<-EOF
+ Wait for the for the secondary cluster to unseal and reach a healthy state.
+ EOF
+ module = module.vault_verify_unsealed
+ depends_on = [
+ step.create_secondary_cluster
+ ]
+
+ providers = {
+ enos = local.enos_provider[matrix.distro]
+ }
+
+ variables {
+ vault_instances = step.create_secondary_cluster_targets.hosts
+ vault_install_dir = local.vault_install_dir
+ }
+ }
+
+ step "get_primary_cluster_ips" {
+ description = <<-EOF
+ Determine which node is the primary and which are followers and map their private IP address
+ to their public IP address. We'll use this information so that we can enable performance
+ replication on the leader.
+ EOF
+ module = module.vault_get_cluster_ips
+ depends_on = [step.verify_that_vault_primary_cluster_is_unsealed]
+
+ providers = {
+ enos = local.enos_provider[matrix.distro]
+ }
+
+ variables {
+ vault_hosts = step.create_primary_cluster_targets.hosts
+ vault_install_dir = local.vault_install_dir
+ vault_root_token = step.create_primary_cluster.root_token
+ }
+ }
+
+ step "get_secondary_cluster_ips" {
+ description = <<-EOF
+ Determine which node is the primary and which are followers and map their private IP address
+ to their public IP address. We'll use this information so that we can enable performance
+ replication on the leader.
+ EOF
+ module = module.vault_get_cluster_ips
+ depends_on = [step.verify_that_vault_secondary_cluster_is_unsealed]
+
+ providers = {
+ enos = local.enos_provider[matrix.distro]
+ }
+
+ variables {
+ vault_hosts = step.create_secondary_cluster_targets.hosts
+ vault_install_dir = local.vault_install_dir
+ vault_root_token = step.create_secondary_cluster.root_token
+ }
+ }
+
+ step "setup_userpass_for_replication_auth" {
+ description = <<-EOF
+ Enable the auth userpass method and create a new user.
+ EOF
+ module = module.vault_verify_write_data
+ depends_on = [step.get_primary_cluster_ips]
+
+
+ providers = {
+ enos = local.enos_provider[matrix.distro]
+ }
+
+ variables {
+ leader_public_ip = step.get_primary_cluster_ips.leader_public_ip
+ leader_private_ip = step.get_primary_cluster_ips.leader_private_ip
+ vault_instances = step.create_primary_cluster_targets.hosts
+ vault_install_dir = local.vault_install_dir
+ vault_root_token = step.create_primary_cluster.root_token
+ }
+ }
+
+ step "configure_performance_replication_primary" {
+ description = <<-EOF
+ Create a superuser policy write it for our new user. Activate performance replication on
+ the primary.
+ EOF
+ module = module.vault_setup_perf_primary
+ depends_on = [
+ step.get_primary_cluster_ips,
+ step.get_secondary_cluster_ips,
+ step.setup_userpass_for_replication_auth,
+ ]
+
+ providers = {
+ enos = local.enos_provider[matrix.distro]
+ }
+
+ variables {
+ primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip
+ primary_leader_private_ip = step.get_primary_cluster_ips.leader_private_ip
+ vault_install_dir = local.vault_install_dir
+ vault_root_token = step.create_primary_cluster.root_token
+ }
+ }
+
+ step "generate_secondary_token" {
+ description = <<-EOF
+ Create a random token and write it to sys/replication/performance/primary/secondary-token on
+ the primary.
+ EOF
+ module = module.generate_secondary_token
+ depends_on = [step.configure_performance_replication_primary]
+
+ providers = {
+ enos = local.enos_provider[matrix.distro]
+ }
+
+ variables {
+ primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip
+ vault_install_dir = local.vault_install_dir
+ vault_root_token = step.create_primary_cluster.root_token
+ }
+ }
+
+ step "configure_performance_replication_secondary" {
+ description = <<-EOF
+ Enable performance replication on the secondary using the new shared token.
+ EOF
+ module = module.vault_setup_perf_secondary
+ depends_on = [step.generate_secondary_token]
+
+ providers = {
+ enos = local.enos_provider[matrix.distro]
+ }
+
+ variables {
+ secondary_leader_public_ip = step.get_secondary_cluster_ips.leader_public_ip
+ secondary_leader_private_ip = step.get_secondary_cluster_ips.leader_private_ip
+ vault_install_dir = local.vault_install_dir
+ vault_root_token = step.create_secondary_cluster.root_token
+ wrapping_token = step.generate_secondary_token.secondary_token
+ }
+ }
+
+ step "unseal_secondary_followers" {
+ description = <<-EOF
+ After replication is enabled we need to unseal the followers on the secondary cluster.
+ Depending on how we're configured we'll pass the unseal keys according to this guide:
+ https://developer.hashicorp.com/vault/docs/enterprise/replication#seals
+ EOF
+ module = module.vault_unseal_nodes
+ depends_on = [
+ step.create_primary_cluster,
+ step.create_secondary_cluster,
+ step.get_secondary_cluster_ips,
+ step.configure_performance_replication_secondary
+ ]
+
+ providers = {
+ enos = local.enos_provider[matrix.distro]
+ }
+
+ variables {
+ follower_public_ips = step.get_secondary_cluster_ips.follower_public_ips
+ vault_install_dir = local.vault_install_dir
+ vault_unseal_keys = matrix.primary_seal == "shamir" ? step.create_primary_cluster.unseal_keys_hex : step.create_primary_cluster.recovery_keys_hex
+ vault_seal_type = matrix.primary_seal == "shamir" ? matrix.primary_seal : matrix.secondary_seal
+ }
+ }
+
+ step "verify_secondary_cluster_is_unsealed_after_enabling_replication" {
+ description = <<-EOF
+ Verify that the secondary cluster is unsealed after we enable PR replication.
+ EOF
+ module = module.vault_verify_unsealed
+ depends_on = [
+ step.unseal_secondary_followers
+ ]
+
+ providers = {
+ enos = local.enos_provider[matrix.distro]
+ }
+
+ variables {
+ vault_instances = step.create_secondary_cluster_targets.hosts
+ vault_install_dir = local.vault_install_dir
+ }
+ }
+
+ step "verify_performance_replication" {
+ description = <<-EOF
+ Check sys/replication/performance/status and ensure that all nodes are in the correct state
+ after enabling performance replication.
+ EOF
+ module = module.vault_verify_performance_replication
+ depends_on = [step.verify_secondary_cluster_is_unsealed_after_enabling_replication]
+
+ providers = {
+ enos = local.enos_provider[matrix.distro]
+ }
+
+ variables {
+ primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip
+ primary_leader_private_ip = step.get_primary_cluster_ips.leader_private_ip
+ secondary_leader_public_ip = step.get_secondary_cluster_ips.leader_public_ip
+ secondary_leader_private_ip = step.get_secondary_cluster_ips.leader_private_ip
+ vault_install_dir = local.vault_install_dir
+ }
+ }
+
+ // When using a Consul backend, these output values will be for the Consul backend.
+ // When using a Raft backend, these output values will be null.
+ output "audit_device_file_path" {
+ description = "The file path for the file audit device, if enabled"
+ value = step.create_primary_cluster.audit_device_file_path
+ }
+
+ output "primary_cluster_hosts" {
+ description = "The Vault primary cluster target hosts"
+ value = step.create_primary_cluster_targets.hosts
+ }
+
+ output "primary_cluster_root_token" {
+ description = "The Vault primary cluster root token"
+ value = step.create_primary_cluster.root_token
+ }
+
+ output "primary_cluster_unseal_keys_b64" {
+ description = "The Vault primary cluster unseal keys"
+ value = step.create_primary_cluster.unseal_keys_b64
+ }
+
+ output "primary_cluster_unseal_keys_hex" {
+ description = "The Vault primary cluster unseal keys hex"
+ value = step.create_primary_cluster.unseal_keys_hex
+ }
+
+ output "primary_cluster_recovery_key_shares" {
+ description = "The Vault primary cluster recovery key shares"
+ value = step.create_primary_cluster.recovery_key_shares
+ }
+
+ output "primary_cluster_recovery_keys_b64" {
+ description = "The Vault primary cluster recovery keys b64"
+ value = step.create_primary_cluster.recovery_keys_b64
+ }
+
+ output "primary_cluster_recovery_keys_hex" {
+ description = "The Vault primary cluster recovery keys hex"
+ value = step.create_primary_cluster.recovery_keys_hex
+ }
+
+ output "secondary_cluster_hosts" {
+ description = "The Vault secondary cluster public IPs"
+ value = step.create_secondary_cluster_targets.hosts
+ }
+
+ output "secondary_cluster_root_token" {
+ description = "The Vault secondary cluster root token"
+ value = step.create_secondary_cluster.root_token
+ }
+
+ output "performance_secondary_token" {
+ description = "The performance secondary replication token"
+ value = step.generate_secondary_token.secondary_token
+ }
+}
diff --git a/enos/enos-dev-scenario-single-cluster.hcl b/enos/enos-dev-scenario-single-cluster.hcl
new file mode 100644
index 000000000000..b3052584e51c
--- /dev/null
+++ b/enos/enos-dev-scenario-single-cluster.hcl
@@ -0,0 +1,510 @@
+# Copyright (c) HashiCorp, Inc.
+# SPDX-License-Identifier: BUSL-1.1
+
+scenario "dev_single_cluster" {
+ description = <<-EOF
+ This scenario spins up a single Vault cluster with either an external Consul cluster or
+ integrated Raft for storage. None of our test verification is included in this scenario in order
+ to improve end-to-end speed. If you wish to perform such verification you'll need to use a
+ non-dev scenario instead.
+
+ The scenario supports finding and installing any released 'linux/amd64' or 'linux/arm64' Vault
+ artifact as long as its version is >= 1.8. You can also use the 'artifact:local' variant to
+ build and deploy the current branch!
+
+ In order to execute this scenario you'll need to install the enos CLI:
+ brew tap hashicorp/tap && brew update && brew install hashicorp/tap/enos
+
+ You'll also need access to an AWS account with an SSH keypair.
+ Perform the steps here to get AWS access with Doormat https://eng-handbook.hashicorp.services/internal-tools/enos/common-setup-steps/#authenticate-with-doormat
+ Perform the steps here to get an AWS keypair set up: https://eng-handbook.hashicorp.services/internal-tools/enos/common-setup-steps/#set-your-aws-key-pair-name-and-private-key
+
+ Please note that this scenario requires several inputs variables to be set in order to function
+ properly. While not all variants will require all variables, it's suggested that you look over
+ the scenario outline to determine which variables affect which steps and which have inputs that
+ you should set. You can use the following command to get a textual outline of the entire
+ scenario:
+ enos scenario outline dev_single_cluster
+
+ You can also create an HTML version that is suitable for viewing in web browsers:
+ enos scenario outline dev_single_cluster --format html > index.html
+ open index.html
+
+ To configure the required variables you have a couple of choices. You can create an
+ 'enos-local.vars' file in the same 'enos' directory where this scenario is defined. In it you
+ declare your desired variable values. For example, you could copy the following content and
+ then set the values as necessary:
+
+ artifactory_username = "username@hashicorp.com"
+ artifactory_token = "
+ aws_region = "us-west-2"
+ aws_ssh_keypair_name = ""
+ aws_ssh_keypair_key_path = "/path/to/your/private/key.pem"
+ dev_build_local_ui = false
+ dev_consul_version = "1.18.1"
+ vault_license_path = "./support/vault.hclic"
+ vault_product_version = "1.16.2"
+
+ Alternatively, you can set them in your environment:
+ export ENOS_VAR_aws_region="us-west-2"
+ export ENOS_VAR_vault_license_path="./support/vault.hclic"
+
+ After you've configured your inputs you can list and filter the available scenarios and then
+ subsequently launch and destroy them.
+ enos scenario list --help
+ enos scenario launch --help
+ enos scenario list dev_single_cluster
+ enos scenario launch dev_single_cluster arch:arm64 artifact:local backend:raft distro:ubuntu edition:ce seal:awskms
+
+ When the scenario is finished launching you refer to the scenario outputs to see information
+ related to your cluster. You can use this information to SSH into nodes and/or to interact
+ with vault.
+ enos scenario output dev_single_cluster arch:arm64 artifact:local backend:raft distro:ubuntu edition:ce seal:awskms
+ ssh -i /path/to/your/private/key.pem
+ vault status
+
+ After you've finished you can tear down the cluster
+ enos scenario destroy dev_single_cluster arch:arm64 artifact:local backend:raft distro:ubuntu edition:ce seal:awskms
+ EOF
+
+ // The matrix is where we define all the baseline combinations that enos can utilize to customize
+ // your scenario. By default enos attempts to perform your command an the entire product! Most
+ // of the time you'll want to reduce that by passing in a filter.
+ // Run 'enos scenario list --help' to see more about how filtering scenarios works in enos.
+ matrix {
+ arch = ["amd64", "arm64"]
+ artifact = ["local", "deb", "rpm", "zip"]
+ backend = ["consul", "raft"]
+ distro = ["ubuntu", "rhel"]
+ edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
+ seal = ["awskms", "pkcs11", "shamir"]
+
+ exclude {
+ edition = ["ent.hsm", "ent.fips1402", "ent.hsm.fips1402"]
+ arch = ["arm64"]
+ }
+
+ exclude {
+ artifact = ["rpm"]
+ distro = ["ubuntu"]
+ }
+
+ exclude {
+ artifact = ["deb"]
+ distro = ["rhel"]
+ }
+
+ exclude {
+ seal = ["pkcs11"]
+ edition = ["ce", "ent", "ent.fips1402"]
+ }
+ }
+
+ // Specify which Terraform configs and providers to use in this scenario. Most of the time you'll
+ // never need to change this! If you wanted to test with different terraform or terraform CLI
+ // settings you can define them and assign them here.
+ terraform_cli = terraform_cli.default
+ terraform = terraform.default
+
+ // Here we declare all of the providers that we might need for our scenario.
+ providers = [
+ provider.aws.default,
+ provider.enos.ubuntu,
+ provider.enos.rhel
+ ]
+
+ // These are variable values that are local to our scenario. They are evaluated after external
+ // variables and scenario matrices but before any of our steps.
+ locals {
+ // The enos provider uses different ssh transport configs for different distros (as
+ // specified in enos-providers.hcl), and we need to be able to access both of those here.
+ enos_provider = {
+ rhel = provider.enos.rhel
+ ubuntu = provider.enos.ubuntu
+ }
+ // We install vault packages from artifactory. If you wish to use one of these variants you'll
+ // need to configure your artifactory credentials.
+ use_artifactory = matrix.artifact == "deb" || matrix.artifact == "rpm"
+ // Zip bundles and local builds don't come with systemd units or any associated configuration.
+ // When this is true we'll let enos handle this for us.
+ manage_service = matrix.artifact == "zip" || matrix.artifact == "local"
+ // If you are using an ent edition, you will need a Vault license. Common convention
+ // is to store it at ./support/vault.hclic, but you may change this path according
+ // to your own preference.
+ vault_install_dir = matrix.artifact == "zip" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro]
+ }
+
+ // Begin scenario steps. These are the steps we'll perform to get your cluster up and running.
+ step "build_or_find_vault_artifact" {
+ description = <<-EOF
+ Depending on how we intend to get our Vault artifact, this step either builds vault from our
+ current branch or finds debian or redhat packages in Artifactory. If we're using a zip bundle
+ we'll get it from releases.hashicorp.com and skip this step entirely. Please note that if you
+ wish to use a deb or rpm artifact you'll have to configure your artifactory credentials!
+
+ Variables that are used in this step:
+
+ artifactory_host:
+ The artifactory host to search. It's very unlikely that you'll want to change this. The
+ default value is the HashiCorp Artifactory instance.
+ artifactory_repo
+ The artifactory host to search. It's very unlikely that you'll want to change this. The
+ default value is where CRT will publish packages.
+ artifactory_username
+ The artifactory username associated with your token. You'll need this if you wish to use
+ deb or rpm artifacts! You can request access via Okta.
+ artifactory_token
+ The artifactory token associated with your username. You'll need this if you wish to use
+ deb or rpm artifacts! You can create a token by logging into Artifactory via Okta.
+ vault_product_version:
+ When using the artifact:rpm or artifact:deb variants we'll use this variable to determine
+ which version of the Vault pacakge we should fetch from Artifactory.
+ vault_artifact_path:
+ When using the artifact:local variant we'll utilize this variable to determine where
+ to create the vault.zip archive from the local branch. Default: to /tmp/vault.zip.
+ vault_local_build_tags:
+ When using the artifact:local variant we'll use this variable to inject custom build
+ tags. If left unset we'll automatically use the build tags that correspond to the edition
+ variant.
+ EOF
+ module = matrix.artifact == "local" ? "build_local" : local.use_artifactory ? "build_artifactory_package" : "build_crt"
+ skip_step = matrix.artifact == "zip"
+
+ variables {
+ // Used for all modules
+ arch = matrix.arch
+ edition = matrix.edition
+ product_version = var.vault_product_version
+ // Required for the local build which will always result in using a local zip bundle
+ artifact_path = matrix.artifact == "local" ? abspath(var.vault_artifact_path) : null
+ build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition]
+ build_ui = var.dev_build_local_ui
+ goarch = matrix.arch
+ goos = "linux"
+ // Required when using a RPM or Deb package
+ // Some of these variables don't have default values so we'll only set them if they are
+ // required.
+ artifactory_host = local.use_artifactory ? var.artifactory_host : null
+ artifactory_repo = local.use_artifactory ? var.artifactory_repo : null
+ artifactory_username = local.use_artifactory ? var.artifactory_username : null
+ artifactory_token = local.use_artifactory ? var.artifactory_token : null
+ distro = matrix.distro
+ }
+ }
+
+ step "ec2_info" {
+ description = "This discovers usefull metadata in Ec2 like AWS AMI ID's that we use in later modules."
+ module = module.ec2_info
+ }
+
+ step "create_vpc" {
+ description = <<-EOF
+ Create the VPC resources required for our scenario.
+
+ Variables that are used in this step:
+ tags:
+ If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable
+ and they'll be added to resources when possible.
+ EOF
+ module = module.create_vpc
+ depends_on = [step.ec2_info]
+
+ variables {
+ common_tags = global.tags
+ }
+ }
+
+ step "read_backend_license" {
+ description = <<-EOF
+ Read the contents of the backend license if we're using a Consul backend and the edition is "ent".
+
+ Variables that are used in this step:
+ backend_edition:
+ The edition of Consul to use. If left unset it will default to CE.
+ backend_license_path:
+ If this variable is set we'll use it to determine the local path on disk that contains a
+ Consul Enterprise license. If it is not set we'll attempt to load it from
+ ./support/consul.hclic.
+ EOF
+ skip_step = matrix.backend == "raft" || var.backend_edition == "oss" || var.backend_edition == "ce"
+ module = module.read_license
+
+ variables {
+ file_name = global.backend_license_path
+ }
+ }
+
+ step "read_vault_license" {
+ description = <<-EOF
+ Validates and reads into memory the contents of a local Vault Enterprise license if we're
+ using an Enterprise edition. This step does not run when using a community edition of Vault.
+
+ Variables that are used in this step:
+ vault_license_path:
+ If this variable is set we'll use it to determine the local path on disk that contains a
+ Vault Enterprise license. If it is not set we'll attempt to load it from
+ ./support/vault.hclic.
+ EOF
+ skip_step = matrix.edition == "ce"
+ module = module.read_license
+
+ variables {
+ file_name = global.vault_license_path
+ }
+ }
+
+ step "create_seal_key" {
+ description = <<-EOF
+ Create the necessary seal keys depending on our configured seal.
+
+ Variables that are used in this step:
+ tags:
+ If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable
+ and they'll be added to resources when possible.
+ EOF
+ module = "seal_${matrix.seal}"
+ depends_on = [step.create_vpc]
+
+ providers = {
+ enos = provider.enos.ubuntu
+ }
+
+ variables {
+ cluster_id = step.create_vpc.id
+ common_tags = global.tags
+ }
+ }
+
+ step "create_vault_cluster_targets" {
+ description = <<-EOF
+ Creates the necessary machine infrastructure targets for the Vault cluster. We also ensure
+ that the firewall is configured to allow the necessary Vault and Consul traffic and SSH
+ from the machine executing the Enos scenario.
+
+ Variables that are used in this step:
+ aws_ssh_keypair_name:
+ The AWS SSH Keypair name to use for target machines.
+ project_name:
+ The project name is used for additional tag metadata on resources.
+ tags:
+ If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable
+ and they'll be added to resources when possible.
+ vault_instance_count:
+ How many instances to provision for the Vault cluster. If left unset it will use a default
+ of three.
+ EOF
+ module = module.target_ec2_instances
+ depends_on = [step.create_vpc]
+
+ providers = {
+ enos = local.enos_provider[matrix.distro]
+ }
+
+ variables {
+ ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]]
+ instance_count = try(var.vault_instance_count, 3)
+ cluster_tag_key = global.vault_tag_key
+ common_tags = global.tags
+ seal_key_names = step.create_seal_key.resource_names
+ vpc_id = step.create_vpc.id
+ }
+ }
+
+ step "create_vault_cluster_backend_targets" {
+ description = <<-EOF
+ Creates the necessary machine infrastructure targets for the backend Consul storage cluster.
+ We also ensure that the firewall is configured to allow the necessary Consul traffic and SSH
+ from the machine executing the Enos scenario. When using integrated storage this step is a
+ no-op that does nothing.
+
+ Variables that are used in this step:
+ tags:
+ If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable
+ and they'll be added to resources when possible.
+ project_name:
+ The project name is used for additional tag metadata on resources.
+ aws_ssh_keypair_name:
+ The AWS SSH Keypair name to use for target machines.
+ EOF
+
+ module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim
+ depends_on = [step.create_vpc]
+
+ providers = {
+ enos = provider.enos.ubuntu
+ }
+
+ variables {
+ ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"]
+ cluster_tag_key = global.backend_tag_key
+ common_tags = global.tags
+ seal_key_names = step.create_seal_key.resource_names
+ vpc_id = step.create_vpc.id
+ }
+ }
+
+ step "create_backend_cluster" {
+ description = <<-EOF
+ Install, configure, and start the backend Consul storage cluster. When we are using the raft
+ storage variant this step is a no-op.
+
+ Variables that are used in this step:
+ backend_edition:
+ When configured with the backend:consul variant we'll utilize this variable to determine
+ the edition of Consul to use for the cluster. Note that if you set it to 'ent' you will
+ also need a valid license configured for the read_backend_license step. Default: ce.
+ dev_consul_version:
+ When configured with the backend:consul variant we'll utilize this variable to determine
+ the version of Consul to use for the cluster.
+ EOF
+ module = "backend_${matrix.backend}"
+ depends_on = [
+ step.create_vault_cluster_backend_targets
+ ]
+
+ providers = {
+ enos = provider.enos.ubuntu
+ }
+
+ variables {
+ cluster_name = step.create_vault_cluster_backend_targets.cluster_name
+ cluster_tag_key = global.backend_tag_key
+ license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null
+ release = {
+ edition = var.backend_edition
+ version = var.dev_consul_version
+ }
+ target_hosts = step.create_vault_cluster_backend_targets.hosts
+ }
+ }
+
+ step "create_vault_cluster" {
+ description = <<-EOF
+ Install, configure, start, initialize and unseal the Vault cluster on the specified target
+ instances.
+
+ Variables that are used in this step:
+ backend_edition:
+ When configured with the backend:consul variant we'll utilize this variable to determine
+ which version of the consul client to install on each node for Consul storage. Note that
+ if you set it to 'ent' you will also need a valid license configured for the
+ read_backend_license step. If left unset we'll use an unlicensed CE version.
+ dev_config_mode:
+ You can set this variable to instruct enos on how to primarily configure Vault when starting
+ the service. Options are 'file' and 'env' for configuration file or environment variables.
+ If left unset we'll use the default value.
+ dev_consul_version:
+ When configured with the backend:consul variant we'll utilize this variable to determine
+ which version of Consul to install. If left unset we'll utilize the default value.
+ vault_artifact_path:
+ When using the artifact:local variant this variable is utilized to specify where on
+ the local disk the vault.zip file we've built is located. It can be left unset to use
+ the default value.
+ vault_enable_audit_devices:
+ Whether or not to enable various audit devices after unsealing the Vault cluster. By default
+ we'll configure syslog, socket, and file auditing.
+ vault_product_version:
+ When using the artifact:zip variant this variable is utilized to specify the version of
+ Vault to download from releases.hashicorp.com.
+ EOF
+ module = module.vault_cluster
+ depends_on = [
+ step.create_backend_cluster,
+ step.create_vault_cluster_targets,
+ step.build_or_find_vault_artifact,
+ ]
+
+ providers = {
+ enos = local.enos_provider[matrix.distro]
+ }
+
+ variables {
+ // We set vault_artifactory_release when we want to get a .deb or .rpm package from Artifactory.
+ // We set vault_release when we want to get a .zip bundle from releases.hashicorp.com
+ // We only set one or the other, never both.
+ artifactory_release = local.use_artifactory ? step.build_or_find_vault_artifact.release : null
+ backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name
+ backend_cluster_tag_key = global.backend_tag_key
+ cluster_name = step.create_vault_cluster_targets.cluster_name
+ config_mode = var.dev_config_mode
+ consul_license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null
+ consul_release = matrix.backend == "consul" ? {
+ edition = var.backend_edition
+ version = var.dev_consul_version
+ } : null
+ enable_audit_devices = var.vault_enable_audit_devices
+ install_dir = local.vault_install_dir
+ license = matrix.edition != "ce" ? step.read_vault_license.license : null
+ local_artifact_path = matrix.artifact == "local" ? abspath(var.vault_artifact_path) : null
+ manage_service = local.manage_service
+ packages = concat(global.packages, global.distro_packages[matrix.distro])
+ release = matrix.artifact == "zip" ? { version = var.vault_product_version, edition = matrix.edition } : null
+ seal_attributes = step.create_seal_key.attributes
+ seal_type = matrix.seal
+ storage_backend = matrix.backend
+ target_hosts = step.create_vault_cluster_targets.hosts
+ }
+ }
+
+ // When using a Consul backend, these output values will be for the Consul backend.
+ // When using a Raft backend, these output values will be null.
+ output "audit_device_file_path" {
+ description = "The file path for the file audit device, if enabled"
+ value = step.create_vault_cluster.audit_device_file_path
+ }
+
+ output "cluster_name" {
+ description = "The Vault cluster name"
+ value = step.create_vault_cluster.cluster_name
+ }
+
+ output "hosts" {
+ description = "The Vault cluster target hosts"
+ value = step.create_vault_cluster.target_hosts
+ }
+
+ output "private_ips" {
+ description = "The Vault cluster private IPs"
+ value = step.create_vault_cluster.private_ips
+ }
+
+ output "public_ips" {
+ description = "The Vault cluster public IPs"
+ value = step.create_vault_cluster.public_ips
+ }
+
+ output "root_token" {
+ description = "The Vault cluster root token"
+ value = step.create_vault_cluster.root_token
+ }
+
+ output "recovery_key_shares" {
+ description = "The Vault cluster recovery key shares"
+ value = step.create_vault_cluster.recovery_key_shares
+ }
+
+ output "recovery_keys_b64" {
+ description = "The Vault cluster recovery keys b64"
+ value = step.create_vault_cluster.recovery_keys_b64
+ }
+
+ output "recovery_keys_hex" {
+ description = "The Vault cluster recovery keys hex"
+ value = step.create_vault_cluster.recovery_keys_hex
+ }
+
+ output "seal_key_attributes" {
+ description = "The Vault cluster seal attributes"
+ value = step.create_seal_key.attributes
+ }
+
+ output "unseal_keys_b64" {
+ description = "The Vault cluster unseal keys"
+ value = step.create_vault_cluster.unseal_keys_b64
+ }
+
+ output "unseal_keys_hex" {
+ description = "The Vault cluster unseal keys hex"
+ value = step.create_vault_cluster.unseal_keys_hex
+ }
+}
diff --git a/enos/enos-dev-variables.hcl b/enos/enos-dev-variables.hcl
new file mode 100644
index 000000000000..1184748f049a
--- /dev/null
+++ b/enos/enos-dev-variables.hcl
@@ -0,0 +1,21 @@
+# Copyright (c) HashiCorp, Inc.
+# SPDX-License-Identifier: BUSL-1.1
+
+variable "dev_build_local_ui" {
+ type = bool
+ description = "Whether or not to build the web UI when using the local builder var. If the assets have already been built we'll still include them"
+ default = false
+}
+
+variable "dev_config_mode" {
+ type = string
+ description = "The method to use when configuring Vault. When set to 'env' we will configure Vault using VAULT_ style environment variables if possible. When 'file' we'll use the HCL configuration file for all configuration options."
+ default = "file" // or "env"
+}
+
+variable "dev_consul_version" {
+ type = string
+ description = "The version of Consul to use when using Consul for storage!"
+ default = "1.18.1"
+ // NOTE: You can also set the "backend_edition" if you want to use Consul Enterprise
+}
diff --git a/enos/enos-globals.hcl b/enos/enos-globals.hcl
index de38a812ffaf..5ca6dd86f8f6 100644
--- a/enos/enos-globals.hcl
+++ b/enos/enos-globals.hcl
@@ -2,11 +2,12 @@
# SPDX-License-Identifier: BUSL-1.1
globals {
- archs = ["amd64", "arm64"]
- artifact_sources = ["local", "crt", "artifactory"]
- artifact_types = ["bundle", "package"]
- backends = ["consul", "raft"]
- backend_tag_key = "VaultStorage"
+ archs = ["amd64", "arm64"]
+ artifact_sources = ["local", "crt", "artifactory"]
+ artifact_types = ["bundle", "package"]
+ backends = ["consul", "raft"]
+ backend_license_path = abspath(var.backend_license_path != null ? var.backend_license_path : joinpath(path.root, "./support/consul.hclic"))
+ backend_tag_key = "VaultStorage"
build_tags = {
"ce" = ["ui"]
"ent" = ["ui", "enterprise", "ent"]
diff --git a/enos/enos-modules.hcl b/enos/enos-modules.hcl
index 055ddbf1a39d..396d54d90981 100644
--- a/enos/enos-modules.hcl
+++ b/enos/enos-modules.hcl
@@ -16,18 +16,31 @@ module "backend_raft" {
source = "./modules/backend_raft"
}
+// Find any artifact in Artifactory. Requires the version, revision, and edition.
+module "build_artifactory" {
+ source = "./modules/build_artifactory_artifact"
+}
+
+// Find any released RPM or Deb in Artifactory. Requires the version, edition, distro, and distro
+// version.
+module "build_artifactory_package" {
+ source = "./modules/build_artifactory_package"
+}
+
+// A shim "build module" suitable for use when using locally pre-built artifacts or a zip bundle
+// from releases.hashicorp.com. When using a local pre-built artifact it requires the local
+// artifact path. When using a release zip it does nothing as you'll need to configure the
+// vault_cluster module with release info instead.
module "build_crt" {
source = "./modules/build_crt"
}
+// Build the local branch and package it into a zip artifact. Requires the goarch, goos, build tags,
+// and bundle path.
module "build_local" {
source = "./modules/build_local"
}
-module "build_artifactory" {
- source = "./modules/vault_artifactory_artifact"
-}
-
module "create_vpc" {
source = "./modules/create_vpc"
diff --git a/enos/enos-terraform.hcl b/enos/enos-terraform.hcl
index c435b154807c..9320f54a57db 100644
--- a/enos/enos-terraform.hcl
+++ b/enos/enos-terraform.hcl
@@ -4,10 +4,6 @@
terraform_cli "default" {
plugin_cache_dir = var.terraform_plugin_cache_dir != null ? abspath(var.terraform_plugin_cache_dir) : null
- credentials "app.terraform.io" {
- token = var.tfc_api_token
- }
-
/*
provider_installation {
dev_overrides = {
diff --git a/enos/enos-variables.hcl b/enos/enos-variables.hcl
index 12a9c961f3bd..ff5aeec7cb3c 100644
--- a/enos/enos-variables.hcl
+++ b/enos/enos-variables.hcl
@@ -93,12 +93,6 @@ variable "terraform_plugin_cache_dir" {
default = null
}
-variable "tfc_api_token" {
- description = "The Terraform Cloud QTI Organization API token. This is used to download the enos Terraform provider."
- type = string
- sensitive = true
-}
-
variable "ubuntu_distro_version" {
description = "The version of ubuntu to use"
type = string
diff --git a/enos/enos.vars.hcl b/enos/enos.vars.hcl
index 96e2b3612f7d..8397eda372c0 100644
--- a/enos/enos.vars.hcl
+++ b/enos/enos.vars.hcl
@@ -51,10 +51,6 @@
# It must exist.
# terraform_plugin_cache_dir = "/Users//.terraform/plugin-cache-dir
-# tfc_api_token is the Terraform Cloud QTI Organization API token. We need this
-# to download the enos Terraform provider and the enos Terraform modules.
-# tfc_api_token = "XXXXX.atlasv1.XXXXX..."
-
# ui_test_filter is the test filter to limit the ui tests to execute for the ui scenario. It will
# be appended to the ember test command as '-f=\"\"'.
# ui_test_filter = "sometest"
diff --git a/enos/k8s/enos-terraform-k8s.hcl b/enos/k8s/enos-terraform-k8s.hcl
index 5844a6b4f619..9b884ef12109 100644
--- a/enos/k8s/enos-terraform-k8s.hcl
+++ b/enos/k8s/enos-terraform-k8s.hcl
@@ -17,8 +17,4 @@ terraform "k8s" {
terraform_cli "default" {
plugin_cache_dir = var.terraform_plugin_cache_dir != null ? abspath(var.terraform_plugin_cache_dir) : null
-
- credentials "app.terraform.io" {
- token = var.tfc_api_token
- }
}
diff --git a/enos/k8s/enos-variables-k8s.hcl b/enos/k8s/enos-variables-k8s.hcl
index e962ff7e2e7f..52ffd8d8225c 100644
--- a/enos/k8s/enos-variables-k8s.hcl
+++ b/enos/k8s/enos-variables-k8s.hcl
@@ -43,11 +43,6 @@ variable "terraform_plugin_cache_dir" {
default = null
}
-variable "tfc_api_token" {
- description = "The Terraform Cloud QTI Organization API token."
- type = string
-}
-
variable "vault_build_date" {
description = "The build date for the vault docker image"
type = string
diff --git a/enos/modules/vault_artifactory_artifact/locals.tf b/enos/modules/build_artifactory_artifact/locals.tf
similarity index 100%
rename from enos/modules/vault_artifactory_artifact/locals.tf
rename to enos/modules/build_artifactory_artifact/locals.tf
diff --git a/enos/modules/vault_artifactory_artifact/main.tf b/enos/modules/build_artifactory_artifact/main.tf
similarity index 100%
rename from enos/modules/vault_artifactory_artifact/main.tf
rename to enos/modules/build_artifactory_artifact/main.tf
diff --git a/enos/modules/vault_artifactory_artifact/outputs.tf b/enos/modules/build_artifactory_artifact/outputs.tf
similarity index 100%
rename from enos/modules/vault_artifactory_artifact/outputs.tf
rename to enos/modules/build_artifactory_artifact/outputs.tf
diff --git a/enos/modules/vault_artifactory_artifact/variables.tf b/enos/modules/build_artifactory_artifact/variables.tf
similarity index 100%
rename from enos/modules/vault_artifactory_artifact/variables.tf
rename to enos/modules/build_artifactory_artifact/variables.tf
diff --git a/enos/modules/build_artifactory_package/main.tf b/enos/modules/build_artifactory_package/main.tf
new file mode 100644
index 000000000000..1e7d0826d22f
--- /dev/null
+++ b/enos/modules/build_artifactory_package/main.tf
@@ -0,0 +1,160 @@
+# Copyright (c) HashiCorp, Inc.
+# SPDX-License-Identifier: BUSL-1.1
+
+terraform {
+ required_providers {
+ enos = {
+ source = "registry.terraform.io/hashicorp-forge/enos"
+ }
+ }
+}
+
+variable "arch" {
+ type = string
+ description = "The architecture for the desired artifact"
+}
+
+variable "artifactory_username" {
+ type = string
+ description = "The username to use when connecting to Artifactory"
+}
+
+variable "artifactory_token" {
+ type = string
+ description = "The token to use when connecting to Artifactory"
+ sensitive = true
+}
+
+variable "artifactory_host" {
+ type = string
+ description = "The Artifactory host to search for Vault artifacts"
+ default = "https://artifactory.hashicorp.engineering/artifactory"
+}
+
+variable "distro" {
+ type = string
+ description = "The distro for the desired artifact (ubuntu or rhel)"
+}
+
+variable "distro_version" {
+ type = string
+ description = "The RHEL version for .rpm packages"
+ default = "9"
+}
+
+variable "edition" {
+ type = string
+ description = "The edition of Vault to use"
+}
+
+variable "product_version" {
+ type = string
+ description = "The version of Vault to use"
+}
+
+// Shim variables that we don't use but include to satisfy the build module "interface"
+variable "artifact_path" { default = null }
+variable "artifact_type" { default = null }
+variable "artifactory_repo" { default = null }
+variable "build_tags" { default = null }
+variable "build_ui" { default = null }
+variable "bundle_path" { default = null }
+variable "goarch" { default = null }
+variable "goos" { default = null }
+variable "revision" { default = null }
+
+locals {
+ // File name prefixes for the various distributions and editions
+ artifact_prefix = {
+ ubuntu = {
+ "ce" = "vault_"
+ "ent" = "vault-enterprise_",
+ "ent.hsm" = "vault-enterprise-hsm_",
+ "ent.hsm.fips1402" = "vault-enterprise-hsm-fips1402_",
+ "oss" = "vault_"
+ },
+ rhel = {
+ "ce" = "vault-"
+ "ent" = "vault-enterprise-",
+ "ent.hsm" = "vault-enterprise-hsm-",
+ "ent.hsm.fips1402" = "vault-enterprise-hsm-fips1402-",
+ "oss" = "vault-"
+ }
+ }
+
+ // Format the version and edition to use in the artifact name
+ artifact_version = {
+ "ce" = "${var.product_version}"
+ "ent" = "${var.product_version}+ent"
+ "ent.hsm" = "${var.product_version}+ent"
+ "ent.hsm.fips1402" = "${var.product_version}+ent"
+ "oss" = "${var.product_version}"
+ }
+
+ // File name extensions for the various architectures and distributions
+ artifact_extension = {
+ amd64 = {
+ ubuntu = "-1_amd64.deb"
+ rhel = "-1.x86_64.rpm"
+ }
+ arm64 = {
+ ubuntu = "-1_arm64.deb"
+ rhel = "-1.aarch64.rpm"
+ }
+ }
+
+ // Use the above variables to construct the artifact name to look up in Artifactory.
+ // Will look something like:
+ // vault_1.12.2-1_arm64.deb
+ // vault-enterprise_1.12.2+ent-1_amd64.deb
+ // vault-enterprise-hsm-1.12.2+ent-1.x86_64.rpm
+ artifact_name = "${local.artifact_prefix[var.distro][var.edition]}${local.artifact_version[var.edition]}${local.artifact_extension[var.arch][var.distro]}"
+
+ // The path within the Artifactory repo that corresponds to the appropriate architecture
+ artifactory_repo_path_dir = {
+ "amd64" = "x86_64"
+ "arm64" = "aarch64"
+ }
+}
+
+data "enos_artifactory_item" "vault_package" {
+ username = var.artifactory_username
+ token = var.artifactory_token
+ name = local.artifact_name
+ host = var.artifactory_host
+ repo = var.distro == "rhel" ? "hashicorp-rpm-release-local*" : "hashicorp-apt-release-local*"
+ path = var.distro == "rhel" ? "RHEL/${var.distro_version}/${local.artifactory_repo_path_dir[var.arch]}/stable" : "pool/${var.arch}/main"
+}
+
+output "results" {
+ value = data.enos_artifactory_item.vault_package.results
+}
+
+output "url" {
+ value = data.enos_artifactory_item.vault_package.results[0].url
+ description = "The artifactory download url for the artifact"
+}
+
+output "sha256" {
+ value = data.enos_artifactory_item.vault_package.results[0].sha256
+ description = "The sha256 checksum for the artifact"
+}
+
+output "size" {
+ value = data.enos_artifactory_item.vault_package.results[0].size
+ description = "The size in bytes of the artifact"
+}
+
+output "name" {
+ value = data.enos_artifactory_item.vault_package.results[0].name
+ description = "The name of the artifact"
+}
+
+output "release" {
+ value = {
+ url = data.enos_artifactory_item.vault_package.results[0].url
+ sha256 = data.enos_artifactory_item.vault_package.results[0].sha256
+ username = var.artifactory_username
+ token = var.artifactory_token
+ }
+}
diff --git a/enos/modules/build_crt/main.tf b/enos/modules/build_crt/main.tf
index 6700cbf78fef..d113c9cbe05e 100644
--- a/enos/modules/build_crt/main.tf
+++ b/enos/modules/build_crt/main.tf
@@ -26,24 +26,12 @@ variable "artifactory_host" { default = null }
variable "artifactory_repo" { default = null }
variable "artifactory_username" { default = null }
variable "artifactory_token" { default = null }
-variable "arch" {
- default = null
-}
-variable "artifact_path" {
- default = null
-}
-variable "artifact_type" {
- default = null
-}
-variable "distro" {
- default = null
-}
-variable "edition" {
- default = null
-}
-variable "revision" {
- default = null
-}
-variable "product_version" {
- default = null
-}
+variable "arch" { default = null }
+variable "artifact_path" { default = null }
+variable "artifact_type" { default = null }
+variable "build_ui" { default = null }
+variable "distro" { default = null }
+variable "distro_version" { default = null }
+variable "edition" { default = null }
+variable "revision" { default = null }
+variable "product_version" { default = null }
diff --git a/enos/modules/build_local/main.tf b/enos/modules/build_local/main.tf
index 555376309463..1ad1338bff91 100644
--- a/enos/modules/build_local/main.tf
+++ b/enos/modules/build_local/main.tf
@@ -9,11 +9,21 @@ terraform {
}
}
+variable "artifact_path" {
+ description = "Where to create the zip bundle of the Vault build"
+}
+
variable "build_tags" {
type = list(string)
description = "The build tags to pass to the Go compiler"
}
+variable "build_ui" {
+ type = bool
+ description = "Whether or not we should build the UI when creating the local build"
+ default = true
+}
+
variable "goarch" {
type = string
description = "The Go architecture target"
@@ -31,12 +41,9 @@ variable "artifactory_repo" { default = null }
variable "artifactory_username" { default = null }
variable "artifactory_token" { default = null }
variable "arch" { default = null }
-variable "artifact_path" {
- type = string
- default = "/tmp/vault.zip"
-}
variable "artifact_type" { default = null }
variable "distro" { default = null }
+variable "distro_version" { default = null }
variable "edition" { default = null }
variable "revision" { default = null }
variable "product_version" { default = null }
@@ -50,8 +57,9 @@ resource "enos_local_exec" "build" {
environment = {
BASE_VERSION = module.local_metadata.version_base
- BIN_PATH = "dist"
- BUNDLE_PATH = var.artifact_path,
+ BIN_PATH = abspath("${path.module}/../../../dist")
+ BUILD_UI = tostring(var.build_ui)
+ BUNDLE_PATH = abspath(var.artifact_path)
GO_TAGS = join(" ", var.build_tags)
GOARCH = var.goarch
GOOS = var.goos
diff --git a/enos/modules/build_local/scripts/build.sh b/enos/modules/build_local/scripts/build.sh
index ad191807a876..06fc03f39462 100755
--- a/enos/modules/build_local/scripts/build.sh
+++ b/enos/modules/build_local/scripts/build.sh
@@ -11,11 +11,14 @@ export CGO_ENABLED=0
root_dir="$(git rev-parse --show-toplevel)"
pushd "$root_dir" > /dev/null
-make ci-build-ui ci-build
-: "${BIN_PATH:="dist"}"
-: "${BUNDLE_PATH:=$(git rev-parse --show-toplevel)/vault.zip}"
-echo "--> Bundling $BIN_PATH/* to $BUNDLE_PATH"
-zip -r -j "$BUNDLE_PATH" "$BIN_PATH/"
+if [ -n "$BUILD_UI" ] && [ "$BUILD_UI" = "true" ]; then
+ make ci-build-ui
+fi
+
+make ci-build
popd > /dev/null
+
+echo "--> Bundling $BIN_PATH/* to $BUNDLE_PATH"
+zip -r -j "$BUNDLE_PATH" "$BIN_PATH/"
diff --git a/enos/modules/vault_cluster/main.tf b/enos/modules/vault_cluster/main.tf
index 8933e1463f12..e0ee864b91c8 100644
--- a/enos/modules/vault_cluster/main.tf
+++ b/enos/modules/vault_cluster/main.tf
@@ -239,6 +239,30 @@ resource "enos_vault_unseal" "maybe_force_unseal" {
}
}
+# Add the vault install location to the PATH and set up VAULT_ADDR and VAULT_TOKEN environement
+# variables in the login shell so we don't have to do it if/when we login in to a cluster node.
+resource "enos_remote_exec" "configure_login_shell_profile" {
+ depends_on = [
+ enos_vault_init.leader,
+ enos_vault_unseal.leader,
+ ]
+ for_each = var.target_hosts
+
+ environment = {
+ VAULT_ADDR = "http://127.0.0.1:8200"
+ VAULT_TOKEN = var.root_token != null ? var.root_token : try(enos_vault_init.leader[0].root_token, "_")
+ VAULT_INSTALL_DIR = var.install_dir
+ }
+
+ scripts = [abspath("${path.module}/scripts/set-up-login-shell-profile.sh")]
+
+ transport = {
+ ssh = {
+ host = each.value.public_ip
+ }
+ }
+}
+
# We need to ensure that the directory used for audit logs is present and accessible to the vault
# user on all nodes, since logging will only happen on the leader.
resource "enos_remote_exec" "create_audit_log_dir" {
diff --git a/enos/modules/vault_cluster/scripts/set-up-login-shell-profile.sh b/enos/modules/vault_cluster/scripts/set-up-login-shell-profile.sh
new file mode 100644
index 000000000000..f3a42d22a59b
--- /dev/null
+++ b/enos/modules/vault_cluster/scripts/set-up-login-shell-profile.sh
@@ -0,0 +1,57 @@
+#!/usr/bin/env bash
+# Copyright (c) HashiCorp, Inc.
+# SPDX-License-Identifier: BUSL-1.1
+
+set -e
+
+fail() {
+ echo "$1" 1>&2
+ exit 1
+}
+
+[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
+[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
+[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
+
+# Determine the profile file we should write to. We only want to affect login shells and bash will
+# only read one of these in ordered of precendence.
+determineProfileFile() {
+ if [ -f "$HOME/.bash_profile" ]; then
+ printf "%s/.bash_profile\n" "$HOME"
+ return 0
+ fi
+
+ if [ -f "$HOME/.bash_login" ]; then
+ printf "%s/.bash_login\n" "$HOME"
+ return 0
+ fi
+
+ printf "%s/.profile\n" "$HOME"
+}
+
+appendVaultProfileInformation() {
+ tee -a "$1" <<< "export PATH=$PATH:$VAULT_INSTALL_DIR
+export VAULT_ADDR=$VAULT_ADDR
+export VAULT_TOKEN=$VAULT_TOKEN"
+}
+
+main() {
+ local profile_file
+ if ! profile_file=$(determineProfileFile); then
+ fail "failed to determine login shell profile file location"
+ fi
+
+ # If vault_cluster is used more than once, eg: autopilot or replication, this module can
+ # be called more than once. Short ciruit here if our profile is already set up.
+ if grep VAULT_ADDR < "$profile_file"; then
+ exit 0
+ fi
+
+ if ! appendVaultProfileInformation "$profile_file"; then
+ fail "failed to write vault configuration to login shell profile"
+ fi
+
+ exit 0
+}
+
+main
diff --git a/go.mod b/go.mod
index 700bf61491c3..4868f38bc537 100644
--- a/go.mod
+++ b/go.mod
@@ -53,7 +53,7 @@ require (
github.com/cockroachdb/cockroach-go/v2 v2.3.8
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
github.com/denisenkom/go-mssqldb v0.12.3
- github.com/docker/docker v25.0.2+incompatible
+ github.com/docker/docker v25.0.5+incompatible
github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74
github.com/dustin/go-humanize v1.0.1
github.com/fatih/color v1.16.0
@@ -136,7 +136,7 @@ require (
github.com/hashicorp/vault-plugin-auth-centrify v0.15.1
github.com/hashicorp/vault-plugin-auth-cf v0.16.0
github.com/hashicorp/vault-plugin-auth-gcp v0.16.2
- github.com/hashicorp/vault-plugin-auth-jwt v0.20.2
+ github.com/hashicorp/vault-plugin-auth-jwt v0.20.3
github.com/hashicorp/vault-plugin-auth-kerberos v0.11.0
github.com/hashicorp/vault-plugin-auth-kubernetes v0.18.0
github.com/hashicorp/vault-plugin-auth-oci v0.15.1
@@ -149,7 +149,7 @@ require (
github.com/hashicorp/vault-plugin-mock v0.16.1
github.com/hashicorp/vault-plugin-secrets-ad v0.17.0
github.com/hashicorp/vault-plugin-secrets-alicloud v0.16.0
- github.com/hashicorp/vault-plugin-secrets-azure v0.17.1
+ github.com/hashicorp/vault-plugin-secrets-azure v0.17.2
github.com/hashicorp/vault-plugin-secrets-gcp v0.18.0
github.com/hashicorp/vault-plugin-secrets-gcpkms v0.16.0
github.com/hashicorp/vault-plugin-secrets-kubernetes v0.7.0
@@ -174,7 +174,6 @@ require (
github.com/kr/text v0.2.0
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.20
- github.com/mholt/archiver/v3 v3.5.1
github.com/michaelklishin/rabbit-hole/v2 v2.12.0
github.com/mikesmitty/edkey v0.0.0-20170222072505-3356ea4e686a
github.com/mitchellh/copystructure v1.2.0
@@ -216,18 +215,18 @@ require (
go.opentelemetry.io/otel/trace v1.24.0
go.uber.org/atomic v1.11.0
go.uber.org/goleak v1.2.1
- golang.org/x/crypto v0.22.0
+ golang.org/x/crypto v0.23.0
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225
- golang.org/x/net v0.24.0
+ golang.org/x/net v0.25.0
golang.org/x/oauth2 v0.18.0
golang.org/x/sync v0.7.0
- golang.org/x/sys v0.19.0
- golang.org/x/term v0.19.0
- golang.org/x/text v0.14.0
+ golang.org/x/sys v0.20.0
+ golang.org/x/term v0.20.0
+ golang.org/x/text v0.15.0
golang.org/x/tools v0.18.0
google.golang.org/api v0.163.0
google.golang.org/grpc v1.61.1
- google.golang.org/protobuf v1.33.0
+ google.golang.org/protobuf v1.34.1
gopkg.in/ory-am/dockertest.v3 v3.3.4
k8s.io/apimachinery v0.29.1
k8s.io/utils v0.0.0-20230726121419-3b25d923346b
@@ -276,7 +275,6 @@ require (
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
github.com/agext/levenshtein v1.2.1 // indirect
- github.com/andybalholm/brotli v1.0.5 // indirect
github.com/apache/arrow/go/v14 v14.0.2 // indirect
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
github.com/aws/aws-sdk-go-v2 v1.23.4 // indirect
@@ -333,11 +331,10 @@ require (
github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect
github.com/digitalocean/godo v1.7.5 // indirect
github.com/dimchansky/utfbom v1.1.1 // indirect
- github.com/distribution/reference v0.5.0 // indirect
+ github.com/distribution/reference v0.6.0 // indirect
github.com/docker/cli v25.0.1+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
- github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
github.com/dvsekhvalnov/jose2go v1.6.0 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
@@ -427,7 +424,6 @@ require (
github.com/kelseyhightower/envconfig v1.4.0 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
- github.com/klauspost/pgzip v1.2.5 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/lib/pq v1.10.9 // indirect
github.com/linode/linodego v0.7.1 // indirect
@@ -460,11 +456,10 @@ require (
github.com/mtibben/percent v0.2.1 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 // indirect
- github.com/nwaples/rardecode v1.1.2 // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect
- github.com/opencontainers/runc v1.1.12 // indirect
+ github.com/opencontainers/runc v1.2.0-rc.1 // indirect
github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect
github.com/oracle/oci-go-sdk/v59 v59.0.0 // indirect
github.com/oracle/oci-go-sdk/v60 v60.0.0 // indirect
@@ -499,7 +494,6 @@ require (
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.4.0 // indirect
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c // indirect
- github.com/ulikunitz/xz v0.5.10 // indirect
github.com/vmware/govmomi v0.18.0 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
@@ -508,7 +502,6 @@ require (
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
- github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
@@ -528,7 +521,6 @@ require (
google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe // indirect
- gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.66.2 // indirect
gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect
diff --git a/go.sum b/go.sum
index 7b43be6e4ff6..d0a70fd14217 100644
--- a/go.sum
+++ b/go.sum
@@ -1366,10 +1366,7 @@ github.com/aliyun/alibaba-cloud-sdk-go v1.62.676/go.mod h1:CJJYa1ZMxjlN/NbXEwmej
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5 h1:nWDRPCyCltiTsANwC/n3QZH7Vww33Npq9MKqlwRzI/c=
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
-github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
-github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=
-github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
@@ -1791,8 +1788,8 @@ github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQ
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
github.com/distribution/distribution/v3 v3.0.0-20220526142353-ffbd94cbe269/go.mod h1:28YO/VJk9/64+sTGNuYaBjWxrXTPrj0C0XmgTIOjxX4=
-github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
-github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
+github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
@@ -1815,8 +1812,8 @@ github.com/docker/docker v23.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bc
github.com/docker/docker v23.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker v25.0.2+incompatible h1:/OaKeauroa10K4Nqavw4zlhcDq/WBcPMc5DbjOGgozY=
-github.com/docker/docker v25.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE=
+github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c=
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
@@ -1832,9 +1829,6 @@ github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
-github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 h1:iFaUwBSo5Svw6L7HYpRu/0lE3e0BaElwnNO1qkNQxBY=
-github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s=
-github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 h1:2MIhn2R6oXQbgW5yHfS+d6YqyMfXiu2L55rFZC4UD/M=
github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod h1:UqXY1lYT/ERa4OEAywUqdok1T4RCRdArkhic1Opuavo=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
@@ -2160,7 +2154,6 @@ github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@@ -2536,8 +2529,8 @@ github.com/hashicorp/vault-plugin-auth-cf v0.16.0 h1:t4+0LY6002NQvY6c0c43ikZjxqR
github.com/hashicorp/vault-plugin-auth-cf v0.16.0/go.mod h1:q+Lt3FhtFlP+pulKSjrbnR8ecu4vY9TlgPvs+nnBey8=
github.com/hashicorp/vault-plugin-auth-gcp v0.16.2 h1:HC1PpXxGNzfu7IUfN7Ok7dIMV29R8a/2EJ5uDnrpxz0=
github.com/hashicorp/vault-plugin-auth-gcp v0.16.2/go.mod h1:8FWNvFElzQBWJGCZ3SBPqsSc/x9bge9Et+JuwVLlJPM=
-github.com/hashicorp/vault-plugin-auth-jwt v0.20.2 h1:UAfUPSL7OgL9wdS7rrzxFVtbVcPmNgip+g4nYq2iQO4=
-github.com/hashicorp/vault-plugin-auth-jwt v0.20.2/go.mod h1:mOngmkea6zPgfo3xX6sQnb12HbEJpl9cuyK+Y3ePB0c=
+github.com/hashicorp/vault-plugin-auth-jwt v0.20.3 h1:mLsdorH4m43rBqybHDZKl33rrmc80ens4hSB6E7i9o0=
+github.com/hashicorp/vault-plugin-auth-jwt v0.20.3/go.mod h1:1IQjNAZ2z8GdTPM/XizC6eA4X9brnOXiwSoYEOfuDlM=
github.com/hashicorp/vault-plugin-auth-kerberos v0.11.0 h1:XjdH8nqosqgKeAwBptMS7DoXsdi8IKL2fbBSyvL/HRM=
github.com/hashicorp/vault-plugin-auth-kerberos v0.11.0/go.mod h1:xupzh9O6Us6bqKLZ6wfRsjqlf1Mb1TRylKpxZPJd5rA=
github.com/hashicorp/vault-plugin-auth-kubernetes v0.18.0 h1:mGVVdcTI55t/NrMefkLjnenAVunJiQZg5o0opuU7ixw=
@@ -2562,8 +2555,8 @@ github.com/hashicorp/vault-plugin-secrets-ad v0.17.0 h1:yXyjHkFduORBwI6g9GxIorXX
github.com/hashicorp/vault-plugin-secrets-ad v0.17.0/go.mod h1:HXT1QFK8wN+HYhWWPAIVYSXnNuBqUDM2TsRgiJT6qUc=
github.com/hashicorp/vault-plugin-secrets-alicloud v0.16.0 h1:rkMe/n9/VylQEm7QeNXgdUaESvLz5UjkokMH1WkFiKU=
github.com/hashicorp/vault-plugin-secrets-alicloud v0.16.0/go.mod h1:xkGzU7LrkgoRhdN2NwLsshqCpjPz2aqkMVzqS6JKJeg=
-github.com/hashicorp/vault-plugin-secrets-azure v0.17.1 h1:A2EuyhwaCENCXsAZXWWQ3r/oNKwGlQydkZi2eD7RyhM=
-github.com/hashicorp/vault-plugin-secrets-azure v0.17.1/go.mod h1:R4SSIIC5/NPpeV7GO1ZQ9z0cLUNufAAVi+oO7bpguUM=
+github.com/hashicorp/vault-plugin-secrets-azure v0.17.2 h1:k1IQ6T5I+AkeEw0HI1yRsulCqfMUVm/S7T/gYIogXp0=
+github.com/hashicorp/vault-plugin-secrets-azure v0.17.2/go.mod h1:R4SSIIC5/NPpeV7GO1ZQ9z0cLUNufAAVi+oO7bpguUM=
github.com/hashicorp/vault-plugin-secrets-gcp v0.18.0 h1:RPKGn6Ai/t4QtdCWg9W7VYTe44cN3jDxgtobTsHHfqE=
github.com/hashicorp/vault-plugin-secrets-gcp v0.18.0/go.mod h1:b5ZdWNoPDo64g5mp16U6UVPTqCU3gKNIZ7Knc//uypg=
github.com/hashicorp/vault-plugin-secrets-gcpkms v0.16.0 h1:1wEYeplJl/9FLwBQSmfpqMdKKwmNz/b3e6K9ZOdJK/s=
@@ -2750,10 +2743,8 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE=
-github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
@@ -2762,13 +2753,10 @@ github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQs
github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
-github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
-github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
-github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -2864,8 +2852,6 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfr
github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
github.com/mediocregopher/radix/v4 v4.1.4 h1:Uze6DEbEAvL+VHXUEu/EDBTkUk5CLct5h3nVSGpc6Ts=
github.com/mediocregopher/radix/v4 v4.1.4/go.mod h1:ajchozX/6ELmydxWeWM6xCFHVpZ4+67LXHOTOVR0nCE=
-github.com/mholt/archiver/v3 v3.5.1 h1:rDjOBX9JSF5BvoJGvjqK479aL70qh9DIpZCl+k7Clwo=
-github.com/mholt/archiver/v3 v3.5.1/go.mod h1:e3dqJ7H78uzsRSEACH1joayhuSyhnonssnDhppzS1L4=
github.com/michaelklishin/rabbit-hole/v2 v2.12.0 h1:946p6jOYFcVJdtBBX8MwXvuBkpPjwm1Nm2Qg8oX+uFk=
github.com/michaelklishin/rabbit-hole/v2 v2.12.0/go.mod h1:AN/3zyz7d++OHf+4WUo/LR0+Q5nlPHMaXasIsG/mPY0=
github.com/microsoft/go-mssqldb v1.5.0 h1:CgENxkwtOBNj3Jg6T1X209y2blCfTTcwuOlznd2k9fk=
@@ -2977,7 +2963,6 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
-github.com/mrunalp/fileutils v0.5.1/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs=
github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
@@ -2994,9 +2979,6 @@ github.com/networkplumbing/go-nft v0.2.0/go.mod h1:HnnM+tYvlGAsMU7yoYwXEVLLiDW9g
github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 h1:BQ1HW7hr4IVovMwWg0E0PYcyW8CzqDcVmaew9cujU4s=
github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2/go.mod h1:TLb2Sg7HQcgGdloNxkrmtgDNR9uVYF3lfdFIN4Ro6Sk=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
-github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
-github.com/nwaples/rardecode v1.1.2 h1:Cj0yZY6T1Zx1R7AhTbyGSALm44/Mmq+BAPc4B/p/d3M=
-github.com/nwaples/rardecode v1.1.2/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
@@ -3078,8 +3060,8 @@ github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04s
github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
-github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss=
-github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8=
+github.com/opencontainers/runc v1.2.0-rc.1 h1:SMjop2pxxYRTfKdsigna/8xRoaoCfIQfD2cVuOb64/o=
+github.com/opencontainers/runc v1.2.0-rc.1/go.mod h1:m9JwxfHzXz5YTTXBQr7EY9KTuazFAGPyMQx2nRR3vTw=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
@@ -3134,7 +3116,6 @@ github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk
github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
-github.com/pierrec/lz4/v4 v4.1.2/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
@@ -3409,10 +3390,6 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
-github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
-github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
-github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
-github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.19.1/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
@@ -3455,8 +3432,6 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
-github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo=
-github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yashtewari/glob-intersection v0.1.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok=
@@ -3685,8 +3660,8 @@ golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf
golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
-golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
-golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
+golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
+golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -3849,8 +3824,8 @@ golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
-golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
-golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
+golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
+golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -4077,8 +4052,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
-golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
+golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -4099,8 +4074,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
-golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
-golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
+golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
+golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -4122,8 +4097,9 @@ golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
-golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
+golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -4619,8 +4595,8 @@ google.golang.org/protobuf v1.29.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw
google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
-google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
+google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -4635,8 +4611,6 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
-gopkg.in/go-jose/go-jose.v2 v2.6.3 h1:nt80fvSDlhKWQgSWyHyy5CfmlQr+asih51R8PTWNKKs=
-gopkg.in/go-jose/go-jose.v2 v2.6.3/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI=
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
diff --git a/helper/testhelpers/corehelpers/corehelpers.go b/helper/testhelpers/corehelpers/corehelpers.go
index cae4d5b019d4..e3071653b5b1 100644
--- a/helper/testhelpers/corehelpers/corehelpers.go
+++ b/helper/testhelpers/corehelpers/corehelpers.go
@@ -543,13 +543,20 @@ type TestLogger struct {
}
func NewTestLogger(t testing.T) *TestLogger {
+ return NewTestLoggerWithSuffix(t, "")
+}
+
+func NewTestLoggerWithSuffix(t testing.T, logFileSuffix string) *TestLogger {
var logFile *os.File
var logPath string
output := os.Stderr
logDir := os.Getenv("VAULT_TEST_LOG_DIR")
if logDir != "" {
- logPath = filepath.Join(logDir, t.Name()+".log")
+ if logFileSuffix != "" && !strings.HasPrefix(logFileSuffix, "_") {
+ logFileSuffix = "_" + logFileSuffix
+ }
+ logPath = filepath.Join(logDir, t.Name()+logFileSuffix+".log")
// t.Name may include slashes.
dir, _ := filepath.Split(logPath)
err := os.MkdirAll(dir, 0o755)
diff --git a/scripts/ci-helper.sh b/scripts/ci-helper.sh
index a87ba4305405..856a4391e3d3 100755
--- a/scripts/ci-helper.sh
+++ b/scripts/ci-helper.sh
@@ -123,8 +123,8 @@ function build() {
popd
}
-# Prepare legal requirements for packaging
-function prepare_legal() {
+# ENT: Prepare legal requirements for packaging
+function prepare_ent_legal() {
: "${PKG_NAME:="vault"}"
pushd "$(repo_root)"
@@ -137,6 +137,21 @@ function prepare_legal() {
popd
}
+# CE: Prepare legal requirements for packaging
+function prepare_ce_legal() {
+ : "${PKG_NAME:="vault"}"
+
+ pushd "$(repo_root)"
+
+ mkdir -p dist
+ cp LICENSE dist/LICENSE.txt
+
+ mkdir -p ".release/linux/package/usr/share/doc/$PKG_NAME"
+ cp LICENSE ".release/linux/package/usr/share/doc/$PKG_NAME/LICENSE.txt"
+
+ popd
+}
+
# Package version converts a vault version string into a compatible representation for system
# packages.
function version_package() {
@@ -161,8 +176,11 @@ function main() {
date)
build_date
;;
- prepare-legal)
- prepare_legal
+ prepare-ent-legal)
+ prepare_ent_legal
+ ;;
+ prepare-ce-legal)
+ prepare_ce_legal
;;
revision)
build_revision
diff --git a/sdk/go.mod b/sdk/go.mod
index 9ba252a1af14..bd44fb57b87c 100644
--- a/sdk/go.mod
+++ b/sdk/go.mod
@@ -7,7 +7,7 @@ require (
github.com/armon/go-metrics v0.4.1
github.com/armon/go-radix v1.0.0
github.com/cenkalti/backoff/v3 v3.2.2
- github.com/docker/docker v24.0.7+incompatible
+ github.com/docker/docker v25.0.5+incompatible
github.com/docker/go-connections v0.4.0
github.com/evanphx/json-patch/v5 v5.6.0
github.com/fatih/structs v1.1.0
@@ -46,11 +46,11 @@ require (
github.com/ryanuber/go-glob v1.0.0
github.com/stretchr/testify v1.8.4
go.uber.org/atomic v1.9.0
- golang.org/x/crypto v0.20.0
- golang.org/x/net v0.21.0
- golang.org/x/text v0.14.0
+ golang.org/x/crypto v0.23.0
+ golang.org/x/net v0.25.0
+ golang.org/x/text v0.15.0
google.golang.org/grpc v1.60.1
- google.golang.org/protobuf v1.31.0
+ google.golang.org/protobuf v1.34.1
)
require (
@@ -60,12 +60,16 @@ require (
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/containerd/containerd v1.7.12 // indirect
+ github.com/containerd/log v0.1.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
- github.com/docker/distribution v2.8.2+incompatible // indirect
+ github.com/distribution/reference v0.6.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
- github.com/fatih/color v1.14.1 // indirect
+ github.com/fatih/color v1.16.0 // indirect
+ github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/frankban/quicktest v1.14.0 // indirect
- github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect
+ github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect
+ github.com/go-logr/logr v1.2.4 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/google/s2a-go v0.1.4 // indirect
@@ -81,30 +85,34 @@ require (
github.com/jackc/pgproto3/v2 v2.3.3 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgtype v1.14.0 // indirect
- github.com/jackc/pgx/v4 v4.18.2 // indirect
+ github.com/jackc/pgx/v4 v4.18.3 // indirect
github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531 // indirect
github.com/klauspost/compress v1.16.5 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
- github.com/mattn/go-isatty v0.0.17 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/patternmatcher v0.5.0 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
+ github.com/moby/sys/user v0.1.0 // indirect
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/oklog/run v1.1.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect
- github.com/opencontainers/runc v1.1.12 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/rogpeppe/go-internal v1.8.1 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/stretchr/objx v0.5.0 // indirect
go.opencensus.io v0.24.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect
+ go.opentelemetry.io/otel v1.19.0 // indirect
+ go.opentelemetry.io/otel/metric v1.19.0 // indirect
+ go.opentelemetry.io/otel/trace v1.19.0 // indirect
golang.org/x/mod v0.11.0 // indirect
golang.org/x/oauth2 v0.13.0 // indirect
- golang.org/x/sys v0.17.0 // indirect
- golang.org/x/term v0.17.0 // indirect
+ golang.org/x/sys v0.20.0 // indirect
+ golang.org/x/term v0.20.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.10.0 // indirect
google.golang.org/api v0.134.0 // indirect
diff --git a/sdk/go.sum b/sdk/go.sum
index 7aa56276c4fc..b69fa2f0e8d5 100644
--- a/sdk/go.sum
+++ b/sdk/go.sum
@@ -71,6 +71,7 @@ github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZ
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M=
github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
+github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
@@ -89,6 +90,8 @@ github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0=
github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk=
+github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
+github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
@@ -97,10 +100,10 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
-github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM=
-github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
+github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE=
+github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -115,15 +118,18 @@ github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJ
github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
-github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w=
-github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg=
+github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
+github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
+github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
+github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss=
github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-asn1-ber/asn1-ber v1.5.4 h1:vXT6d/FNDiELJnLb6hGNa309LMsrCoYFvpwHDF0+Y1A=
github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
+github.com/go-asn1-ber/asn1-ber v1.5.5 h1:MNHlNMBDgEKD4TcKr36vQN68BA00aDfjIt3/bD50WnA=
+github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -136,6 +142,11 @@ github.com/go-ldap/ldap/v3 v3.4.4/go.mod h1:fe1MsuN5eJJ1FeLT/LEBVdWfNWKh459R7aXg
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
+github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
@@ -220,7 +231,9 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
github.com/hashicorp/cap/ldap v0.0.0-20230914221201-c4eecc7e31f7 h1:jgVdtp5YMn++PxnYhAFfrURfLf+nlqzBeddbvRG+tTg=
github.com/hashicorp/cap/ldap v0.0.0-20230914221201-c4eecc7e31f7/go.mod h1:q+c9XV1VqloZFZMu+zdvfb0cm7UrvKbvtmTF5wX5Q9o=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -339,8 +352,8 @@ github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08
github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=
-github.com/jackc/pgx/v4 v4.18.2 h1:xVpYkNR5pk5bMCZGfClbO962UIqVABcAGt7ha1s/FeU=
-github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw=
+github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA=
+github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw=
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
@@ -390,8 +403,8 @@ github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
-github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
-github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/microsoft/go-mssqldb v1.5.0 h1:CgENxkwtOBNj3Jg6T1X209y2blCfTTcwuOlznd2k9fk=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
@@ -416,6 +429,8 @@ github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M
github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
+github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg=
+github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -432,8 +447,6 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8=
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ=
-github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss=
-github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
@@ -471,8 +484,6 @@ github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThC
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
-github.com/sasha-s/go-deadlock v0.2.0 h1:lMqc+fUb7RrFS3gQLtoQsJ7/6TV/pAIFvBsqX73DK8Y=
-github.com/sasha-s/go-deadlock v0.2.0/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
@@ -514,7 +525,19 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q=
+go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=
+go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=
+go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE=
+go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8=
+go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o=
+go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=
+go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
@@ -543,8 +566,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg=
-golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
+golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
+golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -612,8 +635,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
-golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
+golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
+golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -681,13 +704,14 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
-golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
+golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=
-golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
+golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
+golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -698,8 +722,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
-golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
-golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
+golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -858,8 +882,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
-google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
+google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/sdk/helper/certutil/types.go b/sdk/helper/certutil/types.go
index ca245d7d4208..bfdc153c4852 100644
--- a/sdk/helper/certutil/types.go
+++ b/sdk/helper/certutil/types.go
@@ -171,7 +171,7 @@ func GetPrivateKeyTypeFromPublicKey(pubKey crypto.PublicKey) PrivateKeyType {
return RSAPrivateKey
case *ecdsa.PublicKey:
return ECPrivateKey
- case *ed25519.PublicKey:
+ case ed25519.PublicKey:
return Ed25519PrivateKey
default:
return UnknownPrivateKey
diff --git a/sdk/helper/certutil/types_test.go b/sdk/helper/certutil/types_test.go
new file mode 100644
index 000000000000..2cf383afaa02
--- /dev/null
+++ b/sdk/helper/certutil/types_test.go
@@ -0,0 +1,63 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package certutil
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "testing"
+)
+
+func TestGetPrivateKeyTypeFromPublicKey(t *testing.T) {
+ rsaKey, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ t.Fatalf("error generating rsa key: %s", err)
+ }
+
+ ecdsaKey, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
+ if err != nil {
+ t.Fatalf("error generating ecdsa key: %s", err)
+ }
+
+ publicKey, _, err := ed25519.GenerateKey(rand.Reader)
+ if err != nil {
+ t.Fatalf("error generating ed25519 key: %s", err)
+ }
+
+ testCases := map[string]struct {
+ publicKey crypto.PublicKey
+ expectedKeyType PrivateKeyType
+ }{
+ "rsa": {
+ publicKey: rsaKey.Public(),
+ expectedKeyType: RSAPrivateKey,
+ },
+ "ecdsa": {
+ publicKey: ecdsaKey.Public(),
+ expectedKeyType: ECPrivateKey,
+ },
+ "ed25519": {
+ publicKey: publicKey,
+ expectedKeyType: Ed25519PrivateKey,
+ },
+ "bad key type": {
+ publicKey: []byte{},
+ expectedKeyType: UnknownPrivateKey,
+ },
+ }
+
+ for name, tt := range testCases {
+ t.Run(name, func(t *testing.T) {
+ keyType := GetPrivateKeyTypeFromPublicKey(tt.publicKey)
+
+ if keyType != tt.expectedKeyType {
+ t.Fatalf("key type mismatch: expected %s, got %s", tt.expectedKeyType, keyType)
+ }
+ })
+ }
+}
diff --git a/sdk/helper/docker/testhelpers.go b/sdk/helper/docker/testhelpers.go
index 40f269665da5..f1fefd65cd70 100644
--- a/sdk/helper/docker/testhelpers.go
+++ b/sdk/helper/docker/testhelpers.go
@@ -207,7 +207,7 @@ var _ io.Writer = &LogConsumerWriter{}
func (d *Runner) StartNewService(ctx context.Context, addSuffix, forceLocalAddr bool, connect ServiceAdapter) (*Service, string, error) {
if d.RunOptions.PreDelete {
name := d.RunOptions.ContainerName
- matches, err := d.DockerAPI.ContainerList(ctx, types.ContainerListOptions{
+ matches, err := d.DockerAPI.ContainerList(ctx, container.ListOptions{
All: true,
// TODO use labels to ensure we don't delete anything we shouldn't
Filters: filters.NewArgs(
@@ -218,7 +218,7 @@ func (d *Runner) StartNewService(ctx context.Context, addSuffix, forceLocalAddr
return nil, "", fmt.Errorf("failed to list containers named %q", name)
}
for _, cont := range matches {
- err = d.DockerAPI.ContainerRemove(ctx, cont.ID, types.ContainerRemoveOptions{Force: true})
+ err = d.DockerAPI.ContainerRemove(ctx, cont.ID, container.RemoveOptions{Force: true})
if err != nil {
return nil, "", fmt.Errorf("failed to pre-delete container named %q", name)
}
@@ -256,7 +256,7 @@ func (d *Runner) StartNewService(ctx context.Context, addSuffix, forceLocalAddr
cleanup := func() {
for i := 0; i < 10; i++ {
- err := d.DockerAPI.ContainerRemove(ctx, result.Container.ID, types.ContainerRemoveOptions{Force: true})
+ err := d.DockerAPI.ContainerRemove(ctx, result.Container.ID, container.RemoveOptions{Force: true})
if err == nil || client.IsErrNotFound(err) {
return
}
@@ -328,7 +328,7 @@ func (d *Runner) createLogConsumer(containerId string, wg *sync.WaitGroup) func(
func (d *Runner) consumeLogs(containerId string, wg *sync.WaitGroup, logStdout, logStderr io.Writer) {
// We must run inside a goroutine because we're using Follow:true,
// and StdCopy will block until the log stream is closed.
- stream, err := d.DockerAPI.ContainerLogs(context.Background(), containerId, types.ContainerLogsOptions{
+ stream, err := d.DockerAPI.ContainerLogs(context.Background(), containerId, container.LogsOptions{
ShowStdout: true,
ShowStderr: true,
Timestamps: !d.RunOptions.OmitLogTimestamps,
@@ -434,20 +434,20 @@ func (d *Runner) Start(ctx context.Context, addSuffix, forceLocalAddr bool) (*St
for from, to := range d.RunOptions.CopyFromTo {
if err := copyToContainer(ctx, d.DockerAPI, c.ID, from, to); err != nil {
- _ = d.DockerAPI.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{})
+ _ = d.DockerAPI.ContainerRemove(ctx, c.ID, container.RemoveOptions{})
return nil, err
}
}
- err = d.DockerAPI.ContainerStart(ctx, c.ID, types.ContainerStartOptions{})
+ err = d.DockerAPI.ContainerStart(ctx, c.ID, container.StartOptions{})
if err != nil {
- _ = d.DockerAPI.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{})
+ _ = d.DockerAPI.ContainerRemove(ctx, c.ID, container.RemoveOptions{})
return nil, fmt.Errorf("container start failed: %v", err)
}
inspect, err := d.DockerAPI.ContainerInspect(ctx, c.ID)
if err != nil {
- _ = d.DockerAPI.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{})
+ _ = d.DockerAPI.ContainerRemove(ctx, c.ID, container.RemoveOptions{})
return nil, err
}
@@ -492,7 +492,7 @@ func (d *Runner) RefreshFiles(ctx context.Context, containerID string) error {
for from, to := range d.RunOptions.CopyFromTo {
if err := copyToContainer(ctx, d.DockerAPI, containerID, from, to); err != nil {
// TODO too drastic?
- _ = d.DockerAPI.ContainerRemove(ctx, containerID, types.ContainerRemoveOptions{})
+ _ = d.DockerAPI.ContainerRemove(ctx, containerID, container.RemoveOptions{})
return err
}
}
@@ -534,7 +534,7 @@ func (d *Runner) RestartContainerWithTimeout(ctx context.Context, containerID st
}
func (d *Runner) Restart(ctx context.Context, containerID string) error {
- if err := d.DockerAPI.ContainerStart(ctx, containerID, types.ContainerStartOptions{}); err != nil {
+ if err := d.DockerAPI.ContainerStart(ctx, containerID, container.StartOptions{}); err != nil {
return err
}
diff --git a/sdk/helper/testcluster/docker/environment.go b/sdk/helper/testcluster/docker/environment.go
index 068dea424ece..e0d9b72c153b 100644
--- a/sdk/helper/testcluster/docker/environment.go
+++ b/sdk/helper/testcluster/docker/environment.go
@@ -555,7 +555,13 @@ func (n *DockerClusterNode) apiConfig() (*api.Config, error) {
if config.Error != nil {
return nil, config.Error
}
- config.Address = fmt.Sprintf("https://%s", n.HostPort)
+
+ protocol := "https"
+ if n.tlsConfig == nil {
+ protocol = "http"
+ }
+ config.Address = fmt.Sprintf("%s://%s", protocol, n.HostPort)
+
config.HttpClient = client
config.MaxRetries = 0
return config, nil
@@ -618,6 +624,16 @@ func (n *DockerClusterNode) createDefaultListenerConfig() map[string]interface{}
}}
}
+func (n *DockerClusterNode) createTLSDisabledListenerConfig() map[string]interface{} {
+ return map[string]interface{}{"tcp": map[string]interface{}{
+ "address": fmt.Sprintf("%s:%d", "0.0.0.0", 8200),
+ "telemetry": map[string]interface{}{
+ "unauthenticated_metrics_access": true,
+ },
+ "tls_disable": true,
+ }}
+}
+
func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOptions) error {
if n.DataVolumeName == "" {
vol, err := n.DockerAPI.VolumeCreate(ctx, volume.CreateOptions{})
@@ -631,7 +647,15 @@ func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOption
}
vaultCfg := map[string]interface{}{}
var listenerConfig []map[string]interface{}
- listenerConfig = append(listenerConfig, n.createDefaultListenerConfig())
+
+ var defaultListenerConfig map[string]interface{}
+ if opts.DisableTLS {
+ defaultListenerConfig = n.createTLSDisabledListenerConfig()
+ } else {
+ defaultListenerConfig = n.createDefaultListenerConfig()
+ }
+
+ listenerConfig = append(listenerConfig, defaultListenerConfig)
ports := []string{"8200/tcp", "8201/tcp"}
if opts.VaultNodeConfig != nil && opts.VaultNodeConfig.AdditionalListeners != nil {
@@ -683,7 +707,12 @@ func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOption
//// disable_mlock is required for working in the Docker environment with
//// custom plugins
vaultCfg["disable_mlock"] = true
- vaultCfg["api_addr"] = `https://{{- GetAllInterfaces | exclude "flags" "loopback" | attr "address" -}}:8200`
+
+ protocol := "https"
+ if opts.DisableTLS {
+ protocol = "http"
+ }
+ vaultCfg["api_addr"] = fmt.Sprintf(`%s://{{- GetAllInterfaces | exclude "flags" "loopback" | attr "address" -}}:8200`, protocol)
vaultCfg["cluster_addr"] = `https://{{- GetAllInterfaces | exclude "flags" "loopback" | attr "address" -}}:8201`
vaultCfg["administrative_namespace_path"] = opts.AdministrativeNamespacePath
@@ -722,10 +751,12 @@ func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOption
}
}
- // Create a temporary cert so vault will start up
- err = n.setupCert("127.0.0.1")
- if err != nil {
- return err
+ if !opts.DisableTLS {
+ // Create a temporary cert so vault will start up
+ err = n.setupCert("127.0.0.1")
+ if err != nil {
+ return err
+ }
}
caDir := filepath.Join(n.Cluster.tmpDir, "ca")
@@ -753,6 +784,27 @@ func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOption
testcluster.JSONLogNoTimestamp(n.Logger, s)
}}
+ postStartFunc := func(containerID string, realIP string) error {
+ err := n.setupCert(realIP)
+ if err != nil {
+ return err
+ }
+
+ // If we signal Vault before it installs its sighup handler, it'll die.
+ wg.Wait()
+ n.Logger.Trace("running poststart", "containerID", containerID, "IP", realIP)
+ return n.runner.RefreshFiles(ctx, containerID)
+ }
+
+ if opts.DisableTLS {
+ postStartFunc = func(containerID string, realIP string) error {
+ // If we signal Vault before it installs its sighup handler, it'll die.
+ wg.Wait()
+ n.Logger.Trace("running poststart", "containerID", containerID, "IP", realIP)
+ return n.runner.RefreshFiles(ctx, containerID)
+ }
+ }
+
r, err := dockhelper.NewServiceRunner(dockhelper.RunOptions{
ImageRepo: n.ImageRepo,
ImageTag: n.ImageTag,
@@ -767,26 +819,16 @@ func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOption
"VAULT_LOG_FORMAT=json",
"VAULT_LICENSE=" + opts.VaultLicense,
},
- Ports: ports,
- ContainerName: n.Name(),
- NetworkName: opts.NetworkName,
- CopyFromTo: copyFromTo,
- LogConsumer: logConsumer,
- LogStdout: logStdout,
- LogStderr: logStderr,
- PreDelete: true,
- DoNotAutoRemove: true,
- PostStart: func(containerID string, realIP string) error {
- err := n.setupCert(realIP)
- if err != nil {
- return err
- }
-
- // If we signal Vault before it installs its sighup handler, it'll die.
- wg.Wait()
- n.Logger.Trace("running poststart", "containerID", containerID, "IP", realIP)
- return n.runner.RefreshFiles(ctx, containerID)
- },
+ Ports: ports,
+ ContainerName: n.Name(),
+ NetworkName: opts.NetworkName,
+ CopyFromTo: copyFromTo,
+ LogConsumer: logConsumer,
+ LogStdout: logStdout,
+ LogStderr: logStderr,
+ PreDelete: true,
+ DoNotAutoRemove: true,
+ PostStart: postStartFunc,
Capabilities: []string{"NET_ADMIN"},
OmitLogTimestamps: true,
VolumeNameToMountPoint: map[string]string{
@@ -810,7 +852,7 @@ func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOption
if err != nil {
return nil, err
}
- config.Address = fmt.Sprintf("https://%s:%d", host, port)
+ config.Address = fmt.Sprintf("%s://%s:%d", protocol, host, port)
client, err := api.NewClient(config)
if err != nil {
return nil, err
@@ -841,7 +883,7 @@ func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOption
}
n.ContainerNetworkName = netName
n.ContainerIPAddress = svc.Container.NetworkSettings.Networks[netName].IPAddress
- n.RealAPIAddr = "https://" + n.ContainerIPAddress + ":8200"
+ n.RealAPIAddr = protocol + "://" + n.ContainerIPAddress + ":8200"
n.cleanupContainer = svc.Cleanup
client, err := n.newAPIClient()
@@ -1048,6 +1090,7 @@ type DockerClusterOptions struct {
Args []string
StartProbe func(*api.Client) error
Storage testcluster.ClusterStorage
+ DisableTLS bool
}
func ensureLeaderMatches(ctx context.Context, client *api.Client, ready func(response *api.LeaderResponse) error) error {
@@ -1100,13 +1143,15 @@ func (dc *DockerCluster) setupDockerCluster(ctx context.Context, opts *DockerClu
numCores = opts.NumCores
}
- if dc.CA == nil {
- if err := dc.setupCA(opts); err != nil {
- return err
+ if !opts.DisableTLS {
+ if dc.CA == nil {
+ if err := dc.setupCA(opts); err != nil {
+ return err
+ }
}
+ dc.RootCAs = x509.NewCertPool()
+ dc.RootCAs.AddCert(dc.CA.CACert)
}
- dc.RootCAs = x509.NewCertPool()
- dc.RootCAs.AddCert(dc.CA.CACert)
if dc.storage != nil {
if err := dc.storage.Start(ctx, &opts.ClusterOptions); err != nil {
diff --git a/tools/tools.sh b/tools/tools.sh
index 16a117b0efd7..a3007c8c1d9e 100755
--- a/tools/tools.sh
+++ b/tools/tools.sh
@@ -42,7 +42,7 @@ install_external() {
github.com/golangci/revgrep/cmd/revgrep@latest
golang.org/x/tools/cmd/goimports@latest
google.golang.org/protobuf/cmd/protoc-gen-go@latest
- google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest
+ google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.3.0
gotest.tools/gotestsum@latest
honnef.co/go/tools/cmd/staticcheck@latest
mvdan.cc/gofumpt@latest
diff --git a/ui/app/components/policy-form.hbs b/ui/app/components/policy-form.hbs
index 6545348de00e..f3d44d7b9f7a 100644
--- a/ui/app/components/policy-form.hbs
+++ b/ui/app/components/policy-form.hbs
@@ -60,7 +60,7 @@
@isIconOnly={{true}}
@textToCopy={{@model.policy}}
@onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
+ (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger")
)}}
class="transparent"
data-test-copy-button
diff --git a/ui/app/components/sidebar/user-menu.hbs b/ui/app/components/sidebar/user-menu.hbs
index c881e952a2db..908e6795c52f 100644
--- a/ui/app/components/sidebar/user-menu.hbs
+++ b/ui/app/components/sidebar/user-menu.hbs
@@ -55,9 +55,7 @@
class="in-dropdown link is-flex-start"
@onSuccess={{(fn (set-flash-message "Token copied!"))}}
@onError={{(fn
- (set-flash-message
- "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger"
- )
+ (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger")
)}}
data-test-copy-button={{this.auth.currentToken}}
/>
diff --git a/ui/app/models/secret-engine.js b/ui/app/models/secret-engine.js
index b11b379af012..3171de36f593 100644
--- a/ui/app/models/secret-engine.js
+++ b/ui/app/models/secret-engine.js
@@ -148,6 +148,10 @@ export default class SecretEngineModel extends Model {
const { engineRoute } = allEngines().findBy('type', this.engineType);
return `vault.cluster.secrets.backend.${engineRoute}`;
}
+ if (this.isV2KV) {
+ // if it's KV v2 but not registered as an addon, it's type generic
+ return 'vault.cluster.secrets.backend.kv.list';
+ }
return `vault.cluster.secrets.backend.list-root`;
}
diff --git a/ui/app/routes/vault/cluster.js b/ui/app/routes/vault/cluster.js
index e4b2cf1a1da7..7ec85364d79e 100644
--- a/ui/app/routes/vault/cluster.js
+++ b/ui/app/routes/vault/cluster.js
@@ -121,6 +121,10 @@ export default Route.extend(ModelBoundaryRoute, ClusterRoute, {
afterModel(model, transition) {
this._super(...arguments);
this.currentCluster.setCluster(model);
+ if (model.needsInit && this.auth.currentToken) {
+ // clear token to prevent infinite load state
+ this.auth.deleteCurrentToken();
+ }
// Check that namespaces is enabled and if not,
// clear the namespace by transition to this route w/o it
diff --git a/ui/app/routes/vault/cluster/secrets/backend/list.js b/ui/app/routes/vault/cluster/secrets/backend/list.js
index c1db7504a545..2817e730b619 100644
--- a/ui/app/routes/vault/cluster/secrets/backend/list.js
+++ b/ui/app/routes/vault/cluster/secrets/backend/list.js
@@ -82,6 +82,9 @@ export default Route.extend({
return this.router.transitionTo('vault.cluster.secrets.backend.kv.list-directory', backend, secret);
}
return this.router.transitionTo(`vault.cluster.secrets.backend.${engineRoute}`, backend);
+ } else if (secretEngine.isV2KV) {
+ // if it's KV v2 but not registered as an addon, it's type generic
+ return this.router.transitionTo('vault.cluster.secrets.backend.kv.list', backend);
}
const modelType = this.getModelType(backend, tab);
return this.pathHelp.getNewModel(modelType, backend).then(() => {
diff --git a/ui/app/templates/components/configure-ssh-secret.hbs b/ui/app/templates/components/configure-ssh-secret.hbs
index b7eb3e91fb43..d8ef22eba208 100644
--- a/ui/app/templates/components/configure-ssh-secret.hbs
+++ b/ui/app/templates/components/configure-ssh-secret.hbs
@@ -26,9 +26,7 @@
\ No newline at end of file
diff --git a/ui/app/templates/components/console/log-list.hbs b/ui/app/templates/components/console/log-list.hbs
index ace5f2cdb3d7..a92c133e7b20 100644
--- a/ui/app/templates/components/console/log-list.hbs
+++ b/ui/app/templates/components/console/log-list.hbs
@@ -15,9 +15,7 @@
@text="Copy"
@isIconOnly={{true}}
@textToCopy={{multi-line-join this.list}}
- @onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
- )}}
+ @onError={{(fn (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger"))}}
class="transparent icon-grey-500 top-right-absolute"
/>
\ No newline at end of file
diff --git a/ui/app/templates/components/console/log-object.hbs b/ui/app/templates/components/console/log-object.hbs
index c2c3bf21224f..c6d4fb3c26b6 100644
--- a/ui/app/templates/components/console/log-object.hbs
+++ b/ui/app/templates/components/console/log-object.hbs
@@ -9,9 +9,7 @@
@text="Copy"
@isIconOnly={{true}}
@textToCopy={{this.columns}}
- @onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
- )}}
+ @onError={{(fn (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger"))}}
class="transparent icon-grey-500 top-right-absolute"
/>
\ No newline at end of file
diff --git a/ui/app/templates/components/console/log-text.hbs b/ui/app/templates/components/console/log-text.hbs
index dc16eef8b49e..22f1c334f89d 100644
--- a/ui/app/templates/components/console/log-text.hbs
+++ b/ui/app/templates/components/console/log-text.hbs
@@ -9,9 +9,7 @@
@text="Copy"
@isIconOnly={{true}}
@textToCopy={{@content}}
- @onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
- )}}
+ @onError={{(fn (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger"))}}
class="transparent icon-grey-500 top-right-absolute"
/>
\ No newline at end of file
diff --git a/ui/app/templates/components/control-group-success.hbs b/ui/app/templates/components/control-group-success.hbs
index 4946ee5329f9..fd79e5ec2d17 100644
--- a/ui/app/templates/components/control-group-success.hbs
+++ b/ui/app/templates/components/control-group-success.hbs
@@ -30,7 +30,7 @@
@isIconOnly={{true}}
@textToCopy={{stringify this.unwrapData}}
@onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
+ (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger")
)}}
class="transparent top-right-absolute"
/>
diff --git a/ui/app/templates/components/control-group.hbs b/ui/app/templates/components/control-group.hbs
index bb33c26435cf..bdc93a73e1fe 100644
--- a/ui/app/templates/components/control-group.hbs
+++ b/ui/app/templates/components/control-group.hbs
@@ -54,7 +54,7 @@
@isIconOnly={{true}}
@textToCopy={{this.model.id}}
@onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
+ (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger")
)}}
class="transparent top-right-absolute"
/>
@@ -103,7 +103,7 @@
@textToCopy={{this.controlGroupResponse.token}}
@color="secondary"
@onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
+ (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger")
)}}
/>
diff --git a/ui/app/templates/components/generate-credentials.hbs b/ui/app/templates/components/generate-credentials.hbs
index 12b1939dec85..8ab1ca3f28e2 100644
--- a/ui/app/templates/components/generate-credentials.hbs
+++ b/ui/app/templates/components/generate-credentials.hbs
@@ -84,9 +84,7 @@
@@ -96,7 +94,7 @@
@text="Copy Lease ID"
@textToCopy={{this.model.leaseId}}
@onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
+ (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger")
)}}
class="secondary"
/>
diff --git a/ui/app/templates/components/tool-hash.hbs b/ui/app/templates/components/tool-hash.hbs
index 0739d66d69da..b1eed813cfae 100644
--- a/ui/app/templates/components/tool-hash.hbs
+++ b/ui/app/templates/components/tool-hash.hbs
@@ -25,9 +25,7 @@
diff --git a/ui/app/templates/components/tool-random.hbs b/ui/app/templates/components/tool-random.hbs
index 83777b1ef08c..ac0556a06ab1 100644
--- a/ui/app/templates/components/tool-random.hbs
+++ b/ui/app/templates/components/tool-random.hbs
@@ -21,9 +21,7 @@
diff --git a/ui/app/templates/components/tool-rewrap.hbs b/ui/app/templates/components/tool-rewrap.hbs
index 96d9cc23a88d..223a60a429e2 100644
--- a/ui/app/templates/components/tool-rewrap.hbs
+++ b/ui/app/templates/components/tool-rewrap.hbs
@@ -32,9 +32,7 @@
diff --git a/ui/app/templates/components/tool-unwrap.hbs b/ui/app/templates/components/tool-unwrap.hbs
index 0a96118930c0..0aa14feae2d3 100644
--- a/ui/app/templates/components/tool-unwrap.hbs
+++ b/ui/app/templates/components/tool-unwrap.hbs
@@ -46,9 +46,7 @@
diff --git a/ui/app/templates/components/tool-wrap.hbs b/ui/app/templates/components/tool-wrap.hbs
index 981d5d101770..ef28aa91bac1 100644
--- a/ui/app/templates/components/tool-wrap.hbs
+++ b/ui/app/templates/components/tool-wrap.hbs
@@ -32,9 +32,7 @@
diff --git a/ui/app/templates/components/transit-key-action/datakey.hbs b/ui/app/templates/components/transit-key-action/datakey.hbs
index 787c1ed2a1f7..d10d1b28eb48 100644
--- a/ui/app/templates/components/transit-key-action/datakey.hbs
+++ b/ui/app/templates/components/transit-key-action/datakey.hbs
@@ -87,7 +87,7 @@
@color="secondary"
@container="#transit-datakey-modal"
@onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
+ (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger")
)}}
/>
{{/if}}
@@ -96,9 +96,7 @@
@textToCopy={{@ciphertext}}
@color="secondary"
@container="#transit-datakey-modal"
- @onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
- )}}
+ @onError={{(fn (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger"))}}
/>
diff --git a/ui/app/templates/components/transit-key-action/decrypt.hbs b/ui/app/templates/components/transit-key-action/decrypt.hbs
index 8b45ceaaaf50..2f218bdf3d58 100644
--- a/ui/app/templates/components/transit-key-action/decrypt.hbs
+++ b/ui/app/templates/components/transit-key-action/decrypt.hbs
@@ -65,9 +65,7 @@
@textToCopy={{@plaintext}}
@color="secondary"
@container="#transit-decrypt-modal"
- @onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
- )}}
+ @onError={{(fn (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger"))}}
data-test-encrypted-value="plaintext"
/>
diff --git a/ui/app/templates/components/transit-key-action/encrypt.hbs b/ui/app/templates/components/transit-key-action/encrypt.hbs
index cd2a2dc62acf..409090d745d6 100644
--- a/ui/app/templates/components/transit-key-action/encrypt.hbs
+++ b/ui/app/templates/components/transit-key-action/encrypt.hbs
@@ -78,9 +78,7 @@
@textToCopy={{@ciphertext}}
@color="secondary"
@container="#transit-encrypt-modal"
- @onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
- )}}
+ @onError={{(fn (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger"))}}
data-test-encrypted-value="ciphertext"
/>
diff --git a/ui/app/templates/components/transit-key-action/export.hbs b/ui/app/templates/components/transit-key-action/export.hbs
index 83e7c2bf2d4f..ab82fc658f3d 100644
--- a/ui/app/templates/components/transit-key-action/export.hbs
+++ b/ui/app/templates/components/transit-key-action/export.hbs
@@ -77,7 +77,7 @@
@color="secondary"
@container="#transit-export-modal"
@onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
+ (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger")
)}}
data-test-encrypted-value="export"
/>
@@ -92,7 +92,7 @@
@textToCopy={{stringify @keys}}
@container="#transit-export-modal"
@onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
+ (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger")
)}}
class="transparent top-right-absolute"
/>
diff --git a/ui/app/templates/components/transit-key-action/hmac.hbs b/ui/app/templates/components/transit-key-action/hmac.hbs
index 4e76fdba388f..70f7a74f1b12 100644
--- a/ui/app/templates/components/transit-key-action/hmac.hbs
+++ b/ui/app/templates/components/transit-key-action/hmac.hbs
@@ -60,9 +60,7 @@
@textToCopy={{@hmac}}
@color="secondary"
@container="#transit-hmac-modal"
- @onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
- )}}
+ @onError={{(fn (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger"))}}
data-test-encrypted-value="hmac"
/>
diff --git a/ui/app/templates/components/transit-key-action/rewrap.hbs b/ui/app/templates/components/transit-key-action/rewrap.hbs
index 1bdf549a7a73..e55cf4b93444 100644
--- a/ui/app/templates/components/transit-key-action/rewrap.hbs
+++ b/ui/app/templates/components/transit-key-action/rewrap.hbs
@@ -72,9 +72,7 @@
@textToCopy={{@ciphertext}}
@color="secondary"
@container="#transit-rewrap-modal"
- @onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
- )}}
+ @onError={{(fn (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger"))}}
/>
diff --git a/ui/app/templates/components/transit-key-action/sign.hbs b/ui/app/templates/components/transit-key-action/sign.hbs
index 83ad590ef341..be91314e58ac 100644
--- a/ui/app/templates/components/transit-key-action/sign.hbs
+++ b/ui/app/templates/components/transit-key-action/sign.hbs
@@ -129,9 +129,7 @@
@textToCopy={{@signature}}
@color="secondary"
@container="#transit-sign-modal"
- @onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
- )}}
+ @onError={{(fn (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger"))}}
data-test-encrypted-value="signature"
/>
diff --git a/ui/app/templates/vault/cluster/secrets/backend/sign.hbs b/ui/app/templates/vault/cluster/secrets/backend/sign.hbs
index 96fd787dcc54..91f19600b649 100644
--- a/ui/app/templates/vault/cluster/secrets/backend/sign.hbs
+++ b/ui/app/templates/vault/cluster/secrets/backend/sign.hbs
@@ -50,9 +50,7 @@
@@ -62,7 +60,7 @@
@text="Copy lease ID"
@textToCopy={{this.model.leaseId}}
@onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
+ (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger")
)}}
class="secondary"
/>
diff --git a/ui/lib/core/addon/components/certificate-card.hbs b/ui/lib/core/addon/components/certificate-card.hbs
index 65ac9cd04ca9..63283640166a 100644
--- a/ui/lib/core/addon/components/certificate-card.hbs
+++ b/ui/lib/core/addon/components/certificate-card.hbs
@@ -25,9 +25,7 @@
@text="Copy"
@isIconOnly={{true}}
@textToCopy={{this.copyValue}}
- @onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
- )}}
+ @onError={{(fn (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger"))}}
class="transparent"
data-test-copy-button={{or this.copyValue true}}
/>
diff --git a/ui/lib/core/addon/components/choose-pgp-key-form.hbs b/ui/lib/core/addon/components/choose-pgp-key-form.hbs
index a799ca736079..ee66c580681c 100644
--- a/ui/lib/core/addon/components/choose-pgp-key-form.hbs
+++ b/ui/lib/core/addon/components/choose-pgp-key-form.hbs
@@ -26,9 +26,8 @@
class="has-bottom-margin-s"
@textToCopy={{this.pgpKey}}
@color="secondary"
- @onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
- )}}
+ @onError={{(fn (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger"))}}
+ @isTruncated={{true}}
data-test-pgp-key-copy
@container="#shamir-flow-modal"
/>
diff --git a/ui/lib/core/addon/components/copy-secret-dropdown.hbs b/ui/lib/core/addon/components/copy-secret-dropdown.hbs
index 46fe99090a23..3b1258cb1f9d 100644
--- a/ui/lib/core/addon/components/copy-secret-dropdown.hbs
+++ b/ui/lib/core/addon/components/copy-secret-dropdown.hbs
@@ -18,7 +18,7 @@
@textToCopy={{@clipboardText}}
@isFullWidth={{true}}
@onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
+ (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger")
)}}
class="in-dropdown link is-flex-start"
{{on "click" (action (set-flash-message "JSON Copied!"))}}
diff --git a/ui/lib/core/addon/components/filter-input-explicit.hbs b/ui/lib/core/addon/components/filter-input-explicit.hbs
new file mode 100644
index 000000000000..2cf1f2ed2935
--- /dev/null
+++ b/ui/lib/core/addon/components/filter-input-explicit.hbs
@@ -0,0 +1,19 @@
+{{!
+ Copyright (c) HashiCorp, Inc.
+ SPDX-License-Identifier: BUSL-1.1
+~}}
+
+
\ No newline at end of file
diff --git a/ui/lib/core/addon/components/info-table-row.hbs b/ui/lib/core/addon/components/info-table-row.hbs
index 7850fdf73e1b..c7c5cd121768 100644
--- a/ui/lib/core/addon/components/info-table-row.hbs
+++ b/ui/lib/core/addon/components/info-table-row.hbs
@@ -41,7 +41,7 @@
@isIconOnly={{true}}
@textToCopy={{@value}}
@onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
+ (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger")
)}}
class="transparent has-padding-xxs"
data-test-copy-button={{@value}}
@@ -98,9 +98,7 @@
@isIconOnly={{true}}
@textToCopy={{@tooltipText}}
@onError={{(fn
- (set-flash-message
- "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger"
- )
+ (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger")
)}}
class="transparent white-icon"
data-test-tooltip-copy={{@tooltipText}}
diff --git a/ui/lib/core/addon/components/json-editor.hbs b/ui/lib/core/addon/components/json-editor.hbs
index e16bc06c4e67..2a50ccd172e3 100644
--- a/ui/lib/core/addon/components/json-editor.hbs
+++ b/ui/lib/core/addon/components/json-editor.hbs
@@ -40,7 +40,7 @@
@isIconOnly={{true}}
@textToCopy={{@value}}
@onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
+ (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger")
)}}
class="transparent"
data-test-copy-button={{@value}}
diff --git a/ui/lib/core/addon/components/masked-input.hbs b/ui/lib/core/addon/components/masked-input.hbs
index 86af1df79914..eebc6d46f8c8 100644
--- a/ui/lib/core/addon/components/masked-input.hbs
+++ b/ui/lib/core/addon/components/masked-input.hbs
@@ -40,9 +40,7 @@
@text="Copy"
@isIconOnly={{true}}
@textToCopy={{this.copyValue}}
- @onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
- )}}
+ @onError={{(fn (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger"))}}
class="transparent has-padding-xxs"
data-test-copy-button={{or this.copyValue true}}
/>
diff --git a/ui/lib/core/addon/components/shamir/dr-token-flow.hbs b/ui/lib/core/addon/components/shamir/dr-token-flow.hbs
index e9b4af619353..159bdb2b82c1 100644
--- a/ui/lib/core/addon/components/shamir/dr-token-flow.hbs
+++ b/ui/lib/core/addon/components/shamir/dr-token-flow.hbs
@@ -18,9 +18,7 @@
@@ -36,7 +34,7 @@
@textToCopy={{this.otp}}
@container="#shamir-flow-modal"
@onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
+ (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger")
)}}
/>
@@ -96,6 +94,7 @@
{{else if this.generateWithPGP}}
+
diff --git a/ui/lib/core/addon/utils/advanced-secret.js b/ui/lib/core/addon/utils/advanced-secret.js
index d17ba71e762e..267ab22c75ef 100644
--- a/ui/lib/core/addon/utils/advanced-secret.js
+++ b/ui/lib/core/addon/utils/advanced-secret.js
@@ -30,6 +30,11 @@ export function obfuscateData(obj) {
for (const key of Object.keys(obj)) {
if (Array.isArray(obj[key])) {
newObj[key] = obj[key].map(() => '********');
+ } else if (obj[key] === null) {
+ // unfortunately in javascript typeof null returns object
+ // this is due to a "historical js bug that will never be fixed"
+ // we handle this situation here
+ newObj[key] = '********';
} else if (typeof obj[key] === 'object') {
newObj[key] = obfuscateData(obj[key]);
} else {
diff --git a/ui/lib/core/app/components/filter-input-explicit.js b/ui/lib/core/app/components/filter-input-explicit.js
new file mode 100644
index 000000000000..1327ecdafc62
--- /dev/null
+++ b/ui/lib/core/app/components/filter-input-explicit.js
@@ -0,0 +1,6 @@
+/**
+ * Copyright (c) HashiCorp, Inc.
+ * SPDX-License-Identifier: BUSL-1.1
+ */
+
+export { default } from 'core/components/filter-input-explicit';
diff --git a/ui/lib/kmip/addon/templates/credentials/show.hbs b/ui/lib/kmip/addon/templates/credentials/show.hbs
index 97245e6505b7..abe90e1294d7 100644
--- a/ui/lib/kmip/addon/templates/credentials/show.hbs
+++ b/ui/lib/kmip/addon/templates/credentials/show.hbs
@@ -32,9 +32,7 @@
diff --git a/ui/lib/kubernetes/addon/components/page/roles.hbs b/ui/lib/kubernetes/addon/components/page/roles.hbs
index e1ade3fa6115..810b45ae5f54 100644
--- a/ui/lib/kubernetes/addon/components/page/roles.hbs
+++ b/ui/lib/kubernetes/addon/components/page/roles.hbs
@@ -6,8 +6,11 @@
{{#unless @promptConfig}}
diff --git a/ui/lib/kubernetes/addon/components/page/roles.js b/ui/lib/kubernetes/addon/components/page/roles.js
index 8caf60148e42..6de4188d12c4 100644
--- a/ui/lib/kubernetes/addon/components/page/roles.js
+++ b/ui/lib/kubernetes/addon/components/page/roles.js
@@ -4,35 +4,68 @@
*/
import Component from '@glimmer/component';
-import { inject as service } from '@ember/service';
+import { service } from '@ember/service';
import { action } from '@ember/object';
import { getOwner } from '@ember/application';
import errorMessage from 'vault/utils/error-message';
import { tracked } from '@glimmer/tracking';
+import keys from 'core/utils/key-codes';
/**
* @module Roles
- * RolesPage component is a child component to show list of roles
+ * RolesPage component is a child component to show list of roles.
+ * It also handles the filtering actions of roles.
*
* @param {array} roles - array of roles
* @param {boolean} promptConfig - whether or not to display config cta
- * @param {array} pageFilter - array of filtered roles
+ * @param {string} filterValue - value of queryParam pageFilter
* @param {array} breadcrumbs - breadcrumbs as an array of objects that contain label and route
*/
export default class RolesPageComponent extends Component {
@service flashMessages;
+ @service router;
+ @tracked query;
@tracked roleToDelete = null;
+ constructor() {
+ super(...arguments);
+ this.query = this.args.filterValue;
+ }
+
get mountPoint() {
return getOwner(this).mountPoint;
}
+ navigate(pageFilter) {
+ const route = `${this.mountPoint}.roles.index`;
+ const args = [route, { queryParams: { pageFilter: pageFilter || null } }];
+ this.router.transitionTo(...args);
+ }
+
+ @action
+ handleKeyDown(event) {
+ if (event.keyCode === keys.ESC) {
+ // On escape, transition to roles index route.
+ this.navigate();
+ }
+ // ignore all other key events
+ }
+
+ @action handleInput(evt) {
+ this.query = evt.target.value;
+ }
+
+ @action
+ handleSearch(evt) {
+ evt.preventDefault();
+ this.navigate(this.query);
+ }
+
@action
async onDelete(model) {
try {
const message = `Successfully deleted role ${model.name}`;
await model.destroyRecord();
- this.args.roles.removeObject(model);
this.flashMessages.success(message);
} catch (error) {
const message = errorMessage(error, 'Error deleting role. Please try again or contact support');
diff --git a/ui/lib/kubernetes/addon/components/tab-page-header.hbs b/ui/lib/kubernetes/addon/components/tab-page-header.hbs
index 6b6fc74a31a3..cc765170abcc 100644
--- a/ui/lib/kubernetes/addon/components/tab-page-header.hbs
+++ b/ui/lib/kubernetes/addon/components/tab-page-header.hbs
@@ -28,10 +28,12 @@
{{#if @filterRoles}}
-
{{/if}}
diff --git a/ui/lib/kv/addon/components/kv-data-fields.js b/ui/lib/kv/addon/components/kv-data-fields.js
index 4ffd7ab574d4..e1902f82fbbb 100644
--- a/ui/lib/kv/addon/components/kv-data-fields.js
+++ b/ui/lib/kv/addon/components/kv-data-fields.js
@@ -29,8 +29,16 @@ import { stringify } from 'core/helpers/stringify';
export default class KvDataFields extends Component {
@tracked lintingErrors;
+ get startingValue() {
+ // must pass the third param called "space" in JSON.stringify to structure object with whitespace
+ // otherwise the following codemirror modifier check will pass `this._editor.getValue() !== namedArgs.content` and _setValue will be called.
+ // the method _setValue moves the cursor to the beginning of the text field.
+ // the effect is that the cursor jumps after the first key input.
+ return JSON.stringify({ '': '' }, null, 2);
+ }
+
get stringifiedSecretData() {
- return this.args.secret?.secretData ? stringify([this.args.secret.secretData], {}) : '{ "": "" }';
+ return this.args.secret?.secretData ? stringify([this.args.secret.secretData], {}) : this.startingValue;
}
@action
diff --git a/ui/lib/kv/addon/components/page/secret/paths.hbs b/ui/lib/kv/addon/components/page/secret/paths.hbs
index 2016674cc792..fcdc340a5d06 100644
--- a/ui/lib/kv/addon/components/page/secret/paths.hbs
+++ b/ui/lib/kv/addon/components/page/secret/paths.hbs
@@ -25,9 +25,7 @@
@text="Copy"
@isIconOnly={{true}}
@textToCopy={{path.snippet}}
- @onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
- )}}
+ @onError={{(fn (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger"))}}
data-test-copy-button={{path.snippet}}
class="transparent"
/>
diff --git a/ui/lib/replication/addon/templates/mode/secondaries/add.hbs b/ui/lib/replication/addon/templates/mode/secondaries/add.hbs
index 9fc54ccb7832..e9f50914ac53 100644
--- a/ui/lib/replication/addon/templates/mode/secondaries/add.hbs
+++ b/ui/lib/replication/addon/templates/mode/secondaries/add.hbs
@@ -90,7 +90,7 @@
class="primary"
@container=".hds-modal"
@onError={{(fn
- (set-flash-message "Clipboard copy failed. Please make sure the browser Clipboard API is allowed." "danger")
+ (set-flash-message "Clipboard copy failed. The Clipboard API requires a secure context." "danger")
)}}
{{on "click" (action "onCopy")}}
/>
diff --git a/ui/package.json b/ui/package.json
index 1036dd0a550d..7aa9f76cdedf 100644
--- a/ui/package.json
+++ b/ui/package.json
@@ -170,7 +170,7 @@
"escape-string-regexp": "^2.0.0",
"eslint": "^8.37.0",
"eslint-config-prettier": "^8.8.0",
- "eslint-plugin-compat": "4.0.2",
+ "eslint-plugin-compat": "^4.2.0",
"eslint-plugin-ember": "^11.5.0",
"eslint-plugin-n": "^15.7.0",
"eslint-plugin-prettier": "^4.2.1",
diff --git a/ui/tests/acceptance/enterprise-kmip-test.js b/ui/tests/acceptance/enterprise-kmip-test.js
index bf731c560ae4..6eda7ff9f4d8 100644
--- a/ui/tests/acceptance/enterprise-kmip-test.js
+++ b/ui/tests/acceptance/enterprise-kmip-test.js
@@ -15,11 +15,8 @@ import mountSecrets from 'vault/tests/pages/settings/mount-secret-backend';
import { allEngines } from 'vault/helpers/mountable-secret-engines';
import { runCmd } from 'vault/tests/helpers/commands';
-const getRandomPort = () => {
- let a = Math.floor(100000 + Math.random() * 900000);
- a = String(a);
- return a.substring(0, 4);
-};
+// port has a lower limit of 1024
+const getRandomPort = () => Math.floor(Math.random() * 5000 + 1024);
const mount = async (shouldConfig = true) => {
const now = Date.now();
diff --git a/ui/tests/acceptance/enterprise-replication-modes-test.js b/ui/tests/acceptance/enterprise-replication-modes-test.js
index c06c7181acd8..4861f87f7fe1 100644
--- a/ui/tests/acceptance/enterprise-replication-modes-test.js
+++ b/ui/tests/acceptance/enterprise-replication-modes-test.js
@@ -34,11 +34,13 @@ module('Acceptance | Enterprise | replication modes', function (hooks) {
});
test('replication page when unsupported', async function (assert) {
- await this.setupMocks({
+ this.server.get('sys/replication/status', () => ({
data: {
mode: 'unsupported',
},
- });
+ }));
+
+ await authPage.login();
await visit('/vault/replication');
assert.dom(s.title).hasText('Replication unsupported', 'it shows the unsupported view');
diff --git a/ui/tests/acceptance/secrets/backend/generic/secret-test.js b/ui/tests/acceptance/secrets/backend/generic/secret-test.js
index ec6a7cdbf300..2a00f61c12f7 100644
--- a/ui/tests/acceptance/secrets/backend/generic/secret-test.js
+++ b/ui/tests/acceptance/secrets/backend/generic/secret-test.js
@@ -3,7 +3,8 @@
* SPDX-License-Identifier: BUSL-1.1
*/
-import { currentRouteName, visit } from '@ember/test-helpers';
+import { click, currentRouteName, settled, visit } from '@ember/test-helpers';
+import { selectChoose } from 'ember-power-select/test-support';
import { module, test } from 'qunit';
import { setupApplicationTest } from 'ember-qunit';
import { v4 as uuidv4 } from 'uuid';
@@ -19,6 +20,7 @@ import { PAGE } from 'vault/tests/helpers/kv/kv-selectors';
import { create } from 'ember-cli-page-object';
import apiStub from 'vault/tests/helpers/noop-all-api-requests';
+import { deleteEngineCmd, runCmd } from 'vault/tests/helpers/commands';
const cli = create(consolePanel);
@@ -65,7 +67,15 @@ module('Acceptance | secrets/generic/create', function (hooks) {
// upgrade to version 2 generic mount
`write sys/mounts/${path}/tune options=version=2`,
]);
- await visit(`/vault/secrets/${path}/kv/list`);
+ await visit('/vault/secrets');
+ await selectChoose('[data-test-component="search-select"]#filter-by-engine-name', path);
+ await settled();
+ await click(`[data-test-secrets-backend-link="${path}"]`);
+ assert.strictEqual(
+ currentRouteName(),
+ 'vault.cluster.secrets.backend.kv.list',
+ 'navigates to the KV engine list page'
+ );
assert
.dom(PAGE.list.item('foo'))
@@ -78,5 +88,15 @@ module('Acceptance | secrets/generic/create', function (hooks) {
assert.dom(PAGE.list.item(secret.path)).exists('lists both records');
});
assert.dom(PAGE.list.item()).exists({ count: 2 }, 'lists only the two secrets');
+
+ await visit(`/vault/secrets/${path}/list`);
+ assert.strictEqual(
+ currentRouteName(),
+ 'vault.cluster.secrets.backend.kv.list',
+ 'redirects to the KV engine list page from generic list'
+ );
+
+ // Clean up
+ await runCmd(deleteEngineCmd(path));
});
});
diff --git a/ui/tests/acceptance/secrets/backend/kubernetes/roles-test.js b/ui/tests/acceptance/secrets/backend/kubernetes/roles-test.js
index 4a8dcdbdf6fa..ce3690e381ea 100644
--- a/ui/tests/acceptance/secrets/backend/kubernetes/roles-test.js
+++ b/ui/tests/acceptance/secrets/backend/kubernetes/roles-test.js
@@ -10,6 +10,7 @@ import kubernetesScenario from 'vault/mirage/scenarios/kubernetes';
import kubernetesHandlers from 'vault/mirage/handlers/kubernetes';
import authPage from 'vault/tests/pages/auth';
import { fillIn, visit, currentURL, click, currentRouteName } from '@ember/test-helpers';
+import { SELECTORS as GENERAL } from 'vault/tests/helpers/general-selectors';
module('Acceptance | kubernetes | roles', function (hooks) {
setupApplicationTest(hooks);
@@ -30,7 +31,8 @@ module('Acceptance | kubernetes | roles', function (hooks) {
test('it should filter roles', async function (assert) {
await this.visitRoles();
assert.dom('[data-test-list-item-link]').exists({ count: 3 }, 'Roles list renders');
- await fillIn('[data-test-component="navigate-input"]', '1');
+ await fillIn(GENERAL.filterInputExplicit, '1');
+ await click(GENERAL.filterInputExplicitSearch);
assert.dom('[data-test-list-item-link]').exists({ count: 1 }, 'Filtered roles list renders');
assert.ok(currentURL().includes('pageFilter=1'), 'pageFilter query param value is set');
});
diff --git a/ui/tests/acceptance/secrets/backend/kv/kv-v2-workflow-edge-cases-test.js b/ui/tests/acceptance/secrets/backend/kv/kv-v2-workflow-edge-cases-test.js
index bf94b8018838..86afc99b6c77 100644
--- a/ui/tests/acceptance/secrets/backend/kv/kv-v2-workflow-edge-cases-test.js
+++ b/ui/tests/acceptance/secrets/backend/kv/kv-v2-workflow-edge-cases-test.js
@@ -2,7 +2,7 @@
* Copyright (c) HashiCorp, Inc.
* SPDX-License-Identifier: BUSL-1.1
*/
-
+/* eslint-disable no-useless-escape */
import { module, test } from 'qunit';
import { v4 as uuidv4 } from 'uuid';
import { click, currentURL, fillIn, findAll, setupOnerror, typeIn, visit } from '@ember/test-helpers';
@@ -281,7 +281,13 @@ module('Acceptance | kv-v2 workflow | edge cases', function (hooks) {
await fillIn(FORM.inputByAttr('path'), 'complex');
await click(FORM.toggleJson);
- assert.strictEqual(codemirror().getValue(), '{ "": "" }');
+
+ assert.strictEqual(
+ codemirror().getValue(),
+ `{
+ \"\": \"\"
+}`
+ );
codemirror().setValue('{ "foo3": { "name": "bar3" } }');
await click(FORM.saveBtn);
diff --git a/ui/tests/helpers/clients.js b/ui/tests/helpers/clients.js
index 467ef57603e9..a2bf63cabe5f 100644
--- a/ui/tests/helpers/clients.js
+++ b/ui/tests/helpers/clients.js
@@ -120,7 +120,7 @@ export function overrideResponse(httpStatus, data) {
if (httpStatus === 204) {
return new Response(204, { 'Content-Type': 'application/json' });
}
- return new Response(200, { 'Content-Type': 'application/json' }, JSON.stringify(data));
+ return new Response(httpStatus, { 'Content-Type': 'application/json' }, JSON.stringify(data));
}
export async function dateDropdownSelect(month, year) {
diff --git a/ui/tests/helpers/general-selectors.js b/ui/tests/helpers/general-selectors.js
index ea736f61e08f..badce0204632 100644
--- a/ui/tests/helpers/general-selectors.js
+++ b/ui/tests/helpers/general-selectors.js
@@ -16,6 +16,8 @@ export const SELECTORS = {
tab: (name) => `[data-test-tab="${name}"]`,
filter: (name) => `[data-test-filter="${name}"]`,
filterInput: '[data-test-filter-input]',
+ filterInputExplicit: '[data-test-filter-input-explicit]',
+ filterInputExplicitSearch: '[data-test-filter-input-explicit-search]',
confirmModalInput: '[data-test-confirmation-modal-input]',
confirmButton: '[data-test-confirm-button]',
confirmTrigger: '[data-test-confirm-action-trigger]',
diff --git a/ui/tests/integration/components/choose-pgp-key-form-test.js b/ui/tests/integration/components/choose-pgp-key-form-test.js
index 4e87e3e39526..23838386ae08 100644
--- a/ui/tests/integration/components/choose-pgp-key-form-test.js
+++ b/ui/tests/integration/components/choose-pgp-key-form-test.js
@@ -9,6 +9,17 @@ import { setupRenderingTest } from 'vault/tests/helpers';
import { click, fillIn, render } from '@ember/test-helpers';
import { hbs } from 'ember-cli-htmlbars';
+const CHOOSE_PGP = {
+ begin: '[data-test-choose-pgp-key-form="begin"]',
+ description: '[data-test-choose-pgp-key-description]',
+ toggle: '[data-test-text-toggle]',
+ useKeyButton: '[data-test-use-pgp-key-button]',
+ pgpTextArea: '[data-test-pgp-file-textarea]',
+ confirm: '[data-test-pgp-key-confirm]',
+ base64Output: '[data-test-pgp-key-copy]',
+ submit: '[data-test-confirm-pgp-key-submit]',
+ cancel: '[data-test-use-pgp-key-cancel]',
+};
module('Integration | Component | choose-pgp-key-form', function (hooks) {
setupRenderingTest(hooks);
@@ -22,25 +33,24 @@ module('Integration | Component | choose-pgp-key-form', function (hooks) {
hbs``
);
- assert.dom('[data-test-choose-pgp-key-form="begin"]').exists('PGP key selection form exists');
+ assert.dom(CHOOSE_PGP.begin).exists('PGP key selection form exists');
+ assert.dom(CHOOSE_PGP.description).hasText('my custom form text', 'uses custom form text');
+ await click(CHOOSE_PGP.toggle);
+ assert.dom(CHOOSE_PGP.useKeyButton).isDisabled('use pgp button is disabled');
+ await fillIn(CHOOSE_PGP.pgpTextArea, 'base64-pgp-key');
+ assert.dom(CHOOSE_PGP.useKeyButton).isNotDisabled('use pgp button is no longer disabled');
+ await click(CHOOSE_PGP.useKeyButton);
assert
- .dom('[data-test-choose-pgp-key-description]')
- .hasText('my custom form text', 'uses custom form text');
- await click('[data-test-text-toggle]');
- assert.dom('[data-test-use-pgp-key-button]').isDisabled('use pgp button is disabled');
- await fillIn('[data-test-pgp-file-textarea]', 'base64-pgp-key');
- assert.dom('[data-test-use-pgp-key-button]').isNotDisabled('use pgp button is no longer disabled');
- await click('[data-test-use-pgp-key-button]');
- assert
- .dom('[data-test-pgp-key-confirm]')
+ .dom(CHOOSE_PGP.confirm)
.hasText(
'Below is the base-64 encoded PGP Key that will be used. Click the "Do it" button to proceed.',
'Incorporates button text in confirmation'
);
- assert.dom('[data-test-pgp-key-copy]').hasText('base64-pgp-key', 'Shows PGP key contents');
- assert.dom('[data-test-confirm-pgp-key-submit]').hasText('Do it', 'uses passed buttonText');
- await click('[data-test-confirm-pgp-key-submit]');
+ assert.dom(CHOOSE_PGP.base64Output).hasText('base64-pgp-key', 'Shows PGP key contents');
+ assert.dom(CHOOSE_PGP.submit).hasText('Do it', 'uses passed buttonText');
+ await click(CHOOSE_PGP.submit);
});
+
test('it calls onSubmit correctly', async function (assert) {
const submitSpy = sinon.spy();
this.set('onSubmit', submitSpy);
@@ -48,24 +58,24 @@ module('Integration | Component | choose-pgp-key-form', function (hooks) {
hbs``
);
- assert.dom('[data-test-choose-pgp-key-form="begin"]').exists('PGP key selection form exists');
+ assert.dom(CHOOSE_PGP.begin).exists('PGP key selection form exists');
assert
- .dom('[data-test-choose-pgp-key-description]')
+ .dom(CHOOSE_PGP.description)
.hasText('Choose a PGP Key from your computer or paste the contents of one in the form below.');
- await click('[data-test-text-toggle]');
- assert.dom('[data-test-use-pgp-key-button]').isDisabled('use pgp button is disabled');
- await fillIn('[data-test-pgp-file-textarea]', 'base64-pgp-key');
- assert.dom('[data-test-use-pgp-key-button]').isNotDisabled('use pgp button is no longer disabled');
- await click('[data-test-use-pgp-key-button]');
+ await click(CHOOSE_PGP.toggle);
+ assert.dom(CHOOSE_PGP.useKeyButton).isDisabled('use pgp button is disabled');
+ await fillIn(CHOOSE_PGP.pgpTextArea, 'base64-pgp-key');
+ assert.dom(CHOOSE_PGP.useKeyButton).isNotDisabled('use pgp button is no longer disabled');
+ await click(CHOOSE_PGP.useKeyButton);
assert
- .dom('[data-test-pgp-key-confirm]')
+ .dom(CHOOSE_PGP.confirm)
.hasText(
'Below is the base-64 encoded PGP Key that will be used. Click the "Submit" button to proceed.',
'Confirmation text has buttonText'
);
- assert.dom('[data-test-pgp-key-copy]').hasText('base64-pgp-key', 'Shows PGP key contents');
- assert.dom('[data-test-confirm-pgp-key-submit]').hasText('Submit', 'uses passed buttonText');
- await click('[data-test-confirm-pgp-key-submit]');
+ assert.dom(CHOOSE_PGP.base64Output).hasText('base64-pgp-key', 'Shows PGP key contents');
+ assert.dom(CHOOSE_PGP.submit).hasText('Submit', 'uses passed buttonText');
+ await click(CHOOSE_PGP.submit);
assert.ok(submitSpy.calledOnceWith('base64-pgp-key'));
});
@@ -76,9 +86,9 @@ module('Integration | Component | choose-pgp-key-form', function (hooks) {
hbs``
);
- await click('[data-test-text-toggle]');
- await fillIn('[data-test-pgp-file-textarea]', 'base64-pgp-key');
- await click('[data-test-use-pgp-key-cancel]');
+ await click(CHOOSE_PGP.toggle);
+ await fillIn(CHOOSE_PGP.pgpTextArea, 'base64-pgp-key');
+ await click(CHOOSE_PGP.cancel);
assert.ok(cancelSpy.calledOnce);
});
});
diff --git a/ui/tests/integration/components/filter-input-explicit-test.js b/ui/tests/integration/components/filter-input-explicit-test.js
new file mode 100644
index 000000000000..5e6e90941f88
--- /dev/null
+++ b/ui/tests/integration/components/filter-input-explicit-test.js
@@ -0,0 +1,61 @@
+/**
+ * Copyright (c) HashiCorp, Inc.
+ * SPDX-License-Identifier: BUSL-1.1
+ */
+
+import { module, test } from 'qunit';
+import { setupRenderingTest } from 'ember-qunit';
+import { render, typeIn, click } from '@ember/test-helpers';
+import hbs from 'htmlbars-inline-precompile';
+import { SELECTORS as GENERAL } from 'vault/tests/helpers/general-selectors';
+import sinon from 'sinon';
+
+const handler = (e) => {
+ // required because filter-input-explicit passes handleSearch on form submit
+ if (e && e.preventDefault) e.preventDefault();
+ return;
+};
+
+module('Integration | Component | filter-input-explicit', function (hooks) {
+ setupRenderingTest(hooks);
+
+ hooks.beforeEach(function () {
+ this.handleSearch = sinon.spy(handler);
+ this.handleInput = sinon.spy();
+ this.handleKeyDown = sinon.spy();
+ this.query = '';
+ this.placeholder = 'Filter roles';
+
+ this.renderComponent = () => {
+ return render(
+ hbs``
+ );
+ };
+ });
+
+ test('it renders', async function (assert) {
+ this.query = 'foo';
+ await this.renderComponent();
+
+ assert
+ .dom(GENERAL.filterInputExplicit)
+ .hasAttribute('placeholder', 'Filter roles', 'Placeholder passed to input element');
+ assert.dom(GENERAL.filterInputExplicit).hasValue('foo', 'Value passed to input element');
+ });
+
+ test('it should call handleSearch on submit', async function (assert) {
+ await this.renderComponent();
+ await typeIn(GENERAL.filterInputExplicit, 'bar');
+ await click(GENERAL.filterInputExplicitSearch);
+ assert.ok(this.handleSearch.calledOnce, 'handleSearch was called once');
+ });
+
+ test('it should send keydown event on keydown', async function (assert) {
+ await this.renderComponent();
+ await typeIn(GENERAL.filterInputExplicit, 'a');
+ await typeIn(GENERAL.filterInputExplicit, 'b');
+
+ assert.ok(this.handleKeyDown.calledTwice, 'handle keydown was called twice');
+ assert.ok(this.handleSearch.notCalled, 'handleSearch was not called on a keydown event');
+ });
+});
diff --git a/ui/tests/integration/components/kubernetes/page/roles-test.js b/ui/tests/integration/components/kubernetes/page/roles-test.js
index 67e9a4b4ca95..afb7d8543762 100644
--- a/ui/tests/integration/components/kubernetes/page/roles-test.js
+++ b/ui/tests/integration/components/kubernetes/page/roles-test.js
@@ -10,6 +10,7 @@ import { setupMirage } from 'ember-cli-mirage/test-support';
import { render, click } from '@ember/test-helpers';
import hbs from 'htmlbars-inline-precompile';
import { allowAllCapabilitiesStub } from 'vault/tests/helpers/stubs';
+import { SELECTORS as GENERAL } from 'vault/tests/helpers/general-selectors';
module('Integration | Component | kubernetes | Page::Roles', function (hooks) {
setupRenderingTest(hooks);
@@ -58,7 +59,7 @@ module('Integration | Component | kubernetes | Page::Roles', function (hooks) {
.dom('[data-test-toolbar-roles-action]')
.doesNotExist('Create role', 'Toolbar action does not render when not configured');
assert
- .dom('[data-test-nav-input]')
+ .dom(GENERAL.filterInputExplicit)
.doesNotExist('Roles filter input does not render when not configured');
assert.dom('[data-test-config-cta]').exists('Config cta renders');
});
@@ -70,7 +71,7 @@ module('Integration | Component | kubernetes | Page::Roles', function (hooks) {
assert
.dom('[data-test-toolbar-roles-action] svg')
.hasClass('flight-icon-plus', 'Toolbar action has correct icon');
- assert.dom('[data-test-nav-input]').exists('Roles filter input renders');
+ assert.dom(GENERAL.filterInputExplicit).exists('Roles filter input renders');
assert.dom('[data-test-empty-state-title]').hasText('No roles yet', 'Title renders');
assert
.dom('[data-test-empty-state-message]')
diff --git a/ui/tests/integration/components/kubernetes/tab-page-header-test.js b/ui/tests/integration/components/kubernetes/tab-page-header-test.js
index fe658242a4c9..9021573b71ee 100644
--- a/ui/tests/integration/components/kubernetes/tab-page-header-test.js
+++ b/ui/tests/integration/components/kubernetes/tab-page-header-test.js
@@ -9,6 +9,8 @@ import { setupEngine } from 'ember-engines/test-support';
import { setupMirage } from 'ember-cli-mirage/test-support';
import { render } from '@ember/test-helpers';
import hbs from 'htmlbars-inline-precompile';
+import { SELECTORS as GENERAL } from 'vault/tests/helpers/general-selectors';
+import sinon from 'sinon';
module('Integration | Component | kubernetes | TabPageHeader', function (hooks) {
setupRenderingTest(hooks);
@@ -28,12 +30,18 @@ module('Integration | Component | kubernetes | TabPageHeader', function (hooks)
this.model = this.store.peekRecord('secret-engine', 'kubernetes-test');
this.mount = this.model.path.slice(0, -1);
this.breadcrumbs = [{ label: 'secrets', route: 'secrets', linkExternal: true }, { label: this.mount }];
+ this.handleSearch = sinon.spy();
+ this.handleInput = sinon.spy();
+ this.handleKeyDown = sinon.spy();
});
test('it should render breadcrumbs', async function (assert) {
- await render(hbs``, {
- owner: this.engine,
- });
+ await render(
+ hbs``,
+ {
+ owner: this.engine,
+ }
+ );
assert.dom('[data-test-breadcrumbs] li:nth-child(1) a').hasText('secrets', 'Secrets breadcrumb renders');
assert
@@ -42,9 +50,12 @@ module('Integration | Component | kubernetes | TabPageHeader', function (hooks)
});
test('it should render title', async function (assert) {
- await render(hbs``, {
- owner: this.engine,
- });
+ await render(
+ hbs``,
+ {
+ owner: this.engine,
+ }
+ );
assert
.dom('[data-test-header-title] svg')
.hasClass('flight-icon-kubernetes-color', 'Correct icon renders in title');
@@ -52,9 +63,12 @@ module('Integration | Component | kubernetes | TabPageHeader', function (hooks)
});
test('it should render tabs', async function (assert) {
- await render(hbs``, {
- owner: this.engine,
- });
+ await render(
+ hbs``,
+ {
+ owner: this.engine,
+ }
+ );
assert.dom('[data-test-tab="overview"]').hasText('Overview', 'Overview tab renders');
assert.dom('[data-test-tab="roles"]').hasText('Roles', 'Roles tab renders');
assert.dom('[data-test-tab="config"]').hasText('Configuration', 'Configuration tab renders');
@@ -62,16 +76,16 @@ module('Integration | Component | kubernetes | TabPageHeader', function (hooks)
test('it should render filter for roles', async function (assert) {
await render(
- hbs``,
+ hbs``,
{ owner: this.engine }
);
- assert.dom('[data-test-nav-input] input').hasValue('test', 'Filter renders with provided value');
+ assert.dom(GENERAL.filterInputExplicit).hasValue('test', 'Filter renders with provided value');
});
test('it should yield block for toolbar actions', async function (assert) {
await render(
hbs`
-
+
It yields!
`,
diff --git a/ui/tests/integration/components/kv/kv-data-fields-test.js b/ui/tests/integration/components/kv/kv-data-fields-test.js
index 0cd83dfb0c1b..16f8a3df2739 100644
--- a/ui/tests/integration/components/kv/kv-data-fields-test.js
+++ b/ui/tests/integration/components/kv/kv-data-fields-test.js
@@ -43,8 +43,8 @@ module('Integration | Component | kv-v2 | KvDataFields', function (hooks) {
await render(hbs``, { owner: this.engine });
assert.strictEqual(
codemirror().getValue(' '),
- `{ \"\": \"\" }`, // eslint-disable-line no-useless-escape
- 'json editor initializes with empty object'
+ `{ \"\": \"\" }`, // eslint-disable-line no-useless-escape
+ 'json editor initializes with empty object that includes whitespace'
);
await fillIn(`${FORM.jsonEditor} textarea`, 'blah');
assert.strictEqual(codemirror().state.lint.marked.length, 1, 'codemirror lints input');
diff --git a/ui/tests/integration/components/shamir/dr-token-flow-test.js b/ui/tests/integration/components/shamir/dr-token-flow-test.js
index 2cbb76bf25e9..6eb6fcc82c12 100644
--- a/ui/tests/integration/components/shamir/dr-token-flow-test.js
+++ b/ui/tests/integration/components/shamir/dr-token-flow-test.js
@@ -6,9 +6,11 @@
import sinon from 'sinon';
import { module, test } from 'qunit';
import { setupRenderingTest } from 'vault/tests/helpers';
-import { click, fillIn, render } from '@ember/test-helpers';
+import { click, fillIn, render, waitFor } from '@ember/test-helpers';
import { hbs } from 'ember-cli-htmlbars';
import { setupMirage } from 'ember-cli-mirage/test-support';
+import { overrideResponse } from 'vault/tests/helpers/clients';
+import { SELECTORS as GENERAL } from 'vault/tests/helpers/general-selectors';
module('Integration | Component | shamir/dr-token-flow', function (hooks) {
setupRenderingTest(hooks);
@@ -151,6 +153,25 @@ module('Integration | Component | shamir/dr-token-flow', function (hooks) {
assert.dom('[data-test-dr-token-flow-step="shamir"]').exists('Renders shamir step after PGP key chosen');
});
+ test('it shows error with pgp key', async function (assert) {
+ assert.expect(2);
+ this.server.get('/sys/replication/dr/secondary/generate-operation-token/attempt', function () {
+ return {};
+ });
+ this.server.post('/sys/replication/dr/secondary/generate-operation-token/attempt', () =>
+ overrideResponse(400, { errors: ['error parsing PGP key'] })
+ );
+ await render(hbs``);
+ await click('[data-test-use-pgp-key-cta]');
+ assert.dom('[data-test-choose-pgp-key-form="begin"]').exists('PGP form shows');
+ await click('[data-test-text-toggle]');
+ await fillIn('[data-test-pgp-file-textarea]', 'some-key-here');
+ await click('[data-test-use-pgp-key-button]');
+ await click('[data-test-confirm-pgp-key-submit]');
+ await waitFor(GENERAL.messageError);
+ assert.dom(GENERAL.messageError).hasText('Error error parsing PGP key');
+ });
+
test('it cancels correctly when generation not started', async function (assert) {
assert.expect(2);
const cancelSpy = sinon.spy();
diff --git a/ui/tests/unit/utils/advanced-secret-test.js b/ui/tests/unit/utils/advanced-secret-test.js
index 1a58b38e68e7..d2065dbe732d 100644
--- a/ui/tests/unit/utils/advanced-secret-test.js
+++ b/ui/tests/unit/utils/advanced-secret-test.js
@@ -108,7 +108,40 @@ module('Unit | Utility | advanced-secret', function () {
},
].forEach((test) => {
const result = obfuscateData(test.data);
- assert.deepEqual(result, test.obscured, `obfuscates values of ${test.name}`);
+ assert.deepEqual(result, test.obscured, `obfuscates object values of ${test.name}`);
+ });
+ });
+
+ test('it obfuscates null values', function (assert) {
+ assert.expect(2);
+ [
+ {
+ name: 'null value',
+ data: {
+ one: 'fish',
+ two: 'fish',
+ three: 'fish',
+ blue: null,
+ },
+ obscured: {
+ blue: '********',
+ one: '********',
+ three: '********',
+ two: '********',
+ },
+ },
+ {
+ name: 'null value nested-object',
+ data: {
+ one: { two: null },
+ },
+ obscured: {
+ one: { two: '********' },
+ },
+ },
+ ].forEach((test) => {
+ const result = obfuscateData(test.data);
+ assert.deepEqual(result, test.obscured, `obfuscates null values of ${test.name}`);
});
});
diff --git a/ui/yarn.lock b/ui/yarn.lock
index 940f4a958c5b..ebfe77c906c1 100644
--- a/ui/yarn.lock
+++ b/ui/yarn.lock
@@ -2372,17 +2372,10 @@ __metadata:
languageName: node
linkType: hard
-"@mdn/browser-compat-data@npm:^3.3.14":
- version: 3.3.14
- resolution: "@mdn/browser-compat-data@npm:3.3.14"
- checksum: 0363cc9cb3cef308b78b3ba82af0542ad928bacecc95ee1e849c134c3eb866f4771df9fb064fd4da42fdb28e2d6932e0abb6e47ed47fbc42a26a9b0a5aee1e9f
- languageName: node
- linkType: hard
-
-"@mdn/browser-compat-data@npm:^4.1.5":
- version: 4.2.1
- resolution: "@mdn/browser-compat-data@npm:4.2.1"
- checksum: 76eaa7dafed154040e769ba6d23f2dcb58e805ed3ccb376a5c4b76326c92643753c20194faed363870800dc3c1af26c107b8562710c8bb37aaee8c5ffe2a89cd
+"@mdn/browser-compat-data@npm:^5.2.34, @mdn/browser-compat-data@npm:^5.3.13":
+ version: 5.5.28
+ resolution: "@mdn/browser-compat-data@npm:5.5.28"
+ checksum: e82fce4e8cef03b691559ac13fb4d982341c60b638d6a5a7e298eae139e9f1b258843c9c7f299d23f4d8aa4b8d9df6c3075987c5fb2a96fac2fc7b54b0add669
languageName: node
linkType: hard
@@ -4393,12 +4386,12 @@ __metadata:
languageName: node
linkType: hard
-"ast-metadata-inferer@npm:^0.7.0":
- version: 0.7.0
- resolution: "ast-metadata-inferer@npm:0.7.0"
+"ast-metadata-inferer@npm:^0.8.0":
+ version: 0.8.0
+ resolution: "ast-metadata-inferer@npm:0.8.0"
dependencies:
- "@mdn/browser-compat-data": ^3.3.14
- checksum: 9bb633b680d537cd6d5066e48ed6ae68d7ddae663a380bdba78d4f40b53d4c03845dc3a7307d55ecf1248b447ee28d62bb20ade8b880abc2df629cda9414e32e
+ "@mdn/browser-compat-data": ^5.2.34
+ checksum: 8b9f38b5c7d33e2fad80174bb2613fad962c6ef728175281dd7957548608c95d958190b5269b74f6e24d037f6e650b45eb39440c1e206e3f9799aedde27fa54a
languageName: node
linkType: hard
@@ -6595,7 +6588,7 @@ __metadata:
languageName: node
linkType: hard
-"browserslist@npm:^4.0.0, browserslist@npm:^4.14.5, browserslist@npm:^4.16.8, browserslist@npm:^4.22.2, browserslist@npm:^4.23.0":
+"browserslist@npm:^4.0.0, browserslist@npm:^4.14.5, browserslist@npm:^4.21.10, browserslist@npm:^4.22.2, browserslist@npm:^4.23.0":
version: 4.23.0
resolution: "browserslist@npm:4.23.0"
dependencies:
@@ -6842,13 +6835,20 @@ __metadata:
languageName: node
linkType: hard
-"caniuse-lite@npm:^1.0.0, caniuse-lite@npm:^1.0.30000844, caniuse-lite@npm:^1.0.30001304, caniuse-lite@npm:^1.0.30001587":
+"caniuse-lite@npm:^1.0.0, caniuse-lite@npm:^1.0.30000844, caniuse-lite@npm:^1.0.30001587":
version: 1.0.30001608
resolution: "caniuse-lite@npm:1.0.30001608"
checksum: 7ae62689ca358cd3bdb89b2db9b4841812299f8a0b3ab94b52e4548778bd5740814617c0e0b2504b6bfaf47acc2472e1730393bd2027d646acbe8dc8206ad9e7
languageName: node
linkType: hard
+"caniuse-lite@npm:^1.0.30001524":
+ version: 1.0.30001620
+ resolution: "caniuse-lite@npm:1.0.30001620"
+ checksum: 1831e519c29ce6971bc50d56bab196a307fcb4181e7deaa80df314b035b87b3912b8626b4e87adc301d0bfe6a90b99814101b1cb28114b96e720f996f19bdc0d
+ languageName: node
+ linkType: hard
+
"capture-exit@npm:^2.0.0":
version: 2.0.0
resolution: "capture-exit@npm:2.0.0"
@@ -7638,7 +7638,7 @@ __metadata:
languageName: node
linkType: hard
-"core-js@npm:^3.16.2, core-js@npm:^3.24.1":
+"core-js@npm:^3.24.1":
version: 3.36.1
resolution: "core-js@npm:3.36.1"
checksum: 6f6c152179bd0673da34e67a82c6a5c37f31f9fbe908e9caf93749dc62a25b6e07fbff2411de3b74bb2d0661b7f9fb247115ba8efabf9904f5fef26edead515e
@@ -10694,21 +10694,20 @@ __metadata:
languageName: node
linkType: hard
-"eslint-plugin-compat@npm:4.0.2":
- version: 4.0.2
- resolution: "eslint-plugin-compat@npm:4.0.2"
+"eslint-plugin-compat@npm:^4.2.0":
+ version: 4.2.0
+ resolution: "eslint-plugin-compat@npm:4.2.0"
dependencies:
- "@mdn/browser-compat-data": ^4.1.5
- ast-metadata-inferer: ^0.7.0
- browserslist: ^4.16.8
- caniuse-lite: ^1.0.30001304
- core-js: ^3.16.2
+ "@mdn/browser-compat-data": ^5.3.13
+ ast-metadata-inferer: ^0.8.0
+ browserslist: ^4.21.10
+ caniuse-lite: ^1.0.30001524
find-up: ^5.0.0
- lodash.memoize: 4.1.2
- semver: 7.3.5
+ lodash.memoize: ^4.1.2
+ semver: ^7.5.4
peerDependencies:
eslint: ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0
- checksum: 2a1c1ebfd2d9a0e94064417f0f582b183d95bad08014e1f39dd1b4b599c1e909872ea344e016ec8fb297fcf8ab5d0c0b10032f1c2d6d4f9fb57a8a9fad67130e
+ checksum: 68c1f7f6cd1e6fa663568ba1d5c0cef9e42b1e3ec4e9b63a98a2bce18f39711a2313c47ba576a6583e7d92edc7beddc83a583dac8c12ac80c642741fee37e67d
languageName: node
linkType: hard
@@ -14570,7 +14569,7 @@ __metadata:
languageName: node
linkType: hard
-"lodash.memoize@npm:4.1.2, lodash.memoize@npm:^4.1.2":
+"lodash.memoize@npm:^4.1.2":
version: 4.1.2
resolution: "lodash.memoize@npm:4.1.2"
checksum: 9ff3942feeccffa4f1fafa88d32f0d24fdc62fd15ded5a74a5f950ff5f0c6f61916157246744c620173dddf38d37095a92327d5fd3861e2063e736a5c207d089
@@ -18440,17 +18439,6 @@ __metadata:
languageName: node
linkType: hard
-"semver@npm:7.3.5":
- version: 7.3.5
- resolution: "semver@npm:7.3.5"
- dependencies:
- lru-cache: ^6.0.0
- bin:
- semver: bin/semver.js
- checksum: 5eafe6102bea2a7439897c1856362e31cc348ccf96efd455c8b5bc2c61e6f7e7b8250dc26b8828c1d76a56f818a7ee907a36ae9fb37a599d3d24609207001d60
- languageName: node
- linkType: hard
-
"semver@npm:^6.0.0, semver@npm:^6.3.0, semver@npm:^6.3.1":
version: 6.3.1
resolution: "semver@npm:6.3.1"
@@ -18471,6 +18459,15 @@ __metadata:
languageName: node
linkType: hard
+"semver@npm:^7.5.4":
+ version: 7.6.2
+ resolution: "semver@npm:7.6.2"
+ bin:
+ semver: bin/semver.js
+ checksum: 40f6a95101e8d854357a644da1b8dd9d93ce786d5c6a77227bc69dbb17bea83d0d1d1d7c4cd5920a6df909f48e8bd8a5909869535007f90278289f2451d0292d
+ languageName: node
+ linkType: hard
+
"send@npm:0.18.0":
version: 0.18.0
resolution: "send@npm:0.18.0"
@@ -20862,7 +20859,7 @@ __metadata:
escape-string-regexp: ^2.0.0
eslint: ^8.37.0
eslint-config-prettier: ^8.8.0
- eslint-plugin-compat: 4.0.2
+ eslint-plugin-compat: ^4.2.0
eslint-plugin-ember: ^11.5.0
eslint-plugin-n: ^15.7.0
eslint-plugin-prettier: ^4.2.1
diff --git a/vault/activity_log.go b/vault/activity_log.go
index e4836c4e30ea..5fc6d1fed0dc 100644
--- a/vault/activity_log.go
+++ b/vault/activity_log.go
@@ -12,7 +12,6 @@ import (
"io"
"net/http"
"os"
- "path"
"sort"
"strconv"
"strings"
@@ -267,11 +266,20 @@ func NewActivityLog(core *Core, logger log.Logger, view *BarrierView, metrics me
precomputedQueryWritten: make(chan struct{}),
}
- config, err := a.loadConfigOrDefault(core.activeContext, core.ManualLicenseReportingEnabled())
+ config, err := a.loadConfigOrDefault(core.activeContext)
if err != nil {
return nil, err
}
+ // check if the retention time is lesser than the default in storage when reporting is enabled to support upgrades
+ if (config.RetentionMonths < ActivityLogMinimumRetentionMonths) && core.ManualLicenseReportingEnabled() {
+ updatedConfig, err := a.setDefaultRetentionMonthsInConfig(core.activeContext, config)
+ if err != nil {
+ return nil, err
+ }
+ config = updatedConfig
+ }
+
a.SetConfigInit(config)
a.queryStore = activity.NewPrecomputedQueryStore(
@@ -1902,7 +1910,7 @@ func defaultActivityConfig() activityConfig {
}
}
-func (a *ActivityLog) loadConfigOrDefault(ctx context.Context, isReportingEnabled bool) (activityConfig, error) {
+func (a *ActivityLog) loadConfigOrDefault(ctx context.Context) (activityConfig, error) {
// Load from storage
var config activityConfig
configRaw, err := a.view.Get(ctx, activityConfigKey)
@@ -1916,34 +1924,26 @@ func (a *ActivityLog) loadConfigOrDefault(ctx context.Context, isReportingEnable
if err := configRaw.DecodeJSON(&config); err != nil {
return config, err
}
-
- // check if the retention time is lesser than the default when reporting is enabled
- if (config.RetentionMonths < ActivityLogMinimumRetentionMonths) && isReportingEnabled {
- updatedConfig, err := a.setDefaultRetentionMonthsInConfig(ctx, config)
- if err != nil {
- return config, err
- }
- return updatedConfig, nil
- }
return config, nil
}
// setDefaultRetentionMonthsInConfig sets the retention months in activity config with default value.
// This supports upgrades from versions prior to set the new default ActivityLogMinimumRetentionMonths.
func (a *ActivityLog) setDefaultRetentionMonthsInConfig(ctx context.Context, inputConfig activityConfig) (activityConfig, error) {
+ if a.core.perfStandby {
+ return inputConfig, nil
+ }
+
inputConfig.RetentionMonths = ActivityLogMinimumRetentionMonths
// Store the config
- entry, err := logical.StorageEntryJSON(path.Join(activitySubPath, activityConfigKey), inputConfig)
+ entry, err := logical.StorageEntryJSON(activityConfigKey, inputConfig)
if err != nil {
return inputConfig, err
}
if err := a.view.Put(ctx, entry); err != nil {
return inputConfig, err
}
-
- // Set the new config on the activity log
- a.SetConfig(ctx, inputConfig)
return inputConfig, nil
}
diff --git a/vault/activity_log_util.go b/vault/activity_log_util.go
index a3a9d2b9c1b3..890af5533fad 100644
--- a/vault/activity_log_util.go
+++ b/vault/activity_log_util.go
@@ -7,13 +7,9 @@ package vault
import (
"context"
- "time"
)
// sendCurrentFragment is a no-op on OSS
func (a *ActivityLog) sendCurrentFragment(ctx context.Context) error {
return nil
}
-
-// CensusReport is a no-op on OSS
-func (a *ActivityLog) CensusReport(context.Context, CensusReporter, time.Time) {}
diff --git a/vault/hcp_link/proto/go.mod b/vault/hcp_link/proto/go.mod
index bad600fdf57e..6a98c40a9892 100644
--- a/vault/hcp_link/proto/go.mod
+++ b/vault/hcp_link/proto/go.mod
@@ -3,14 +3,13 @@ module github.com/hashicorp/vault/vault/hcp_link/proto
go 1.19
require (
- google.golang.org/grpc v1.56.3
- google.golang.org/protobuf v1.30.0
+ google.golang.org/grpc v1.63.2
+ google.golang.org/protobuf v1.34.1
)
require (
- github.com/golang/protobuf v1.5.3 // indirect
- golang.org/x/net v0.17.0 // indirect
- golang.org/x/sys v0.13.0 // indirect
- golang.org/x/text v0.13.0 // indirect
- google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
+ golang.org/x/net v0.25.0 // indirect
+ golang.org/x/sys v0.20.0 // indirect
+ golang.org/x/text v0.15.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect
)
diff --git a/vault/hcp_link/proto/go.sum b/vault/hcp_link/proto/go.sum
index ffa46a35054e..2af26c42317f 100644
--- a/vault/hcp_link/proto/go.sum
+++ b/vault/hcp_link/proto/go.sum
@@ -1,20 +1,13 @@
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
-github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
-golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
-golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
-golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
-golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
-google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc=
-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
-google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
+golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
+golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
+golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
+golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY=
+google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
+google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
+google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
+google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
diff --git a/vault/identity_store.go b/vault/identity_store.go
index c10edf7ad368..8d53f4c35682 100644
--- a/vault/identity_store.go
+++ b/vault/identity_store.go
@@ -6,6 +6,7 @@ package vault
import (
"context"
"fmt"
+ "reflect"
"strings"
"time"
@@ -24,6 +25,7 @@ import (
"github.com/hashicorp/vault/sdk/helper/locksutil"
"github.com/hashicorp/vault/sdk/logical"
"github.com/patrickmn/go-cache"
+ "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/timestamppb"
)
@@ -621,316 +623,453 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) {
defer i.lock.Unlock()
switch {
- // Check if the key is a storage entry key for an entity bucket
case strings.HasPrefix(key, storagepacker.StoragePackerBucketsPrefix):
- // Create a MemDB transaction
- txn := i.db.Txn(true)
- defer txn.Abort()
-
- // Each entity object in MemDB holds the MD5 hash of the storage
- // entry key of the entity bucket. Fetch all the entities that
- // belong to this bucket using the hash value. Remove these entities
- // from MemDB along with all the aliases of each entity.
- entitiesFetched, err := i.MemDBEntitiesByBucketKeyInTxn(txn, key)
- if err != nil {
- i.logger.Error("failed to fetch entities using the bucket key", "key", key)
- return
- }
+ // key is for a entity bucket in storage.
+ i.invalidateEntityBucket(ctx, key)
+ case strings.HasPrefix(key, groupBucketsPrefix):
+ // key is for a group bucket in storage.
+ i.invalidateGroupBucket(ctx, key)
+ case strings.HasPrefix(key, oidcTokensPrefix):
+ // key is for oidc tokens in storage.
+ i.invalidateOIDCToken(ctx)
+ case strings.HasPrefix(key, clientPath):
+ // key is for a client in storage.
+ i.invalidateClientPath(ctx, key)
+ case strings.HasPrefix(key, localAliasesBucketsPrefix):
+ // key is for a local alias bucket in storage.
+ i.invalidateLocalAliasesBucket(ctx, key)
+ }
+}
+
+func (i *IdentityStore) invalidateEntityBucket(ctx context.Context, key string) {
+ txn := i.db.Txn(true)
+ defer txn.Abort()
- for _, entity := range entitiesFetched {
- // Delete all the aliases in the entity. This function will also remove
- // the corresponding alias indexes too.
- err = i.deleteAliasesInEntityInTxn(txn, entity, entity.Aliases)
+ // The handling of entities has the added quirk of dealing with a temporary
+ // copy of the entity written in storage on the active node of performance
+ // secondary clusters. These temporary entity entries in storage must be
+ // removed once the actual entity appears in the storage bucket (as
+ // replicated from the primary cluster).
+ //
+ // This function retrieves all entities from MemDB that have a corresponding
+ // storage key that matches the provided key to invalidate. This is the set
+ // of entities that need to be updated, removed, or left alone in MemDB.
+ //
+ // The logic iterates over every entity stored in the invalidated storage
+ // bucket. For each entity read from the storage bucket, the set of entities
+ // read from MemDB is searched for the same entity. If it can't be found,
+ // it means that it needs to be inserted into MemDB. On the other hand, if
+ // the entity is found, it the storage bucket entity is compared to the
+ // MemDB entity. If they do not match, then the storage entity state needs
+ // to be used to update the MemDB entity; if they did match, then it means
+ // that the MemDB entity can be left alone. As each MemDB entity is
+ // processed in the loop, it is removed from the set of MemDB entities.
+ //
+ // Once all entities from the storage bucket have been compared to those
+ // retrieved from MemDB, the remaining entities from the set retrieved from
+ // MemDB are those that have been deleted from storage and must be removed
+ // from MemDB (because as MemDB entities that matches a storage bucket
+ // entity were processed, they were removed from the set).
+ memDBEntities, err := i.MemDBEntitiesByBucketKeyInTxn(txn, key)
+ if err != nil {
+ i.logger.Error("failed to fetch entities using the bucket key", "key", key)
+ return
+ }
+
+ bucket, err := i.entityPacker.GetBucket(ctx, key)
+ if err != nil {
+ i.logger.Error("failed to refresh entities", "key", key, "error", err)
+ return
+ }
+
+ if bucket != nil {
+ // The storage entry for the entity bucket exists, so we need to compare
+ // the entities in that bucket with those in MemDB and only update those
+ // that are different. The entities in the bucket storage entry are the
+ // source of truth.
+
+ // Iterate over each entity item from the bucket
+ for _, item := range bucket.Items {
+ bucketEntity, err := i.parseEntityFromBucketItem(ctx, item)
if err != nil {
- i.logger.Error("failed to delete aliases in entity", "entity_id", entity.ID, "error", err)
+ i.logger.Error("failed to parse entity from bucket entry item", "error", err)
return
}
- // Delete the entity using the same transaction
- err = i.MemDBDeleteEntityByIDInTxn(txn, entity.ID)
+ localAliases, err := i.parseLocalAliases(bucketEntity.ID)
if err != nil {
- i.logger.Error("failed to delete entity from MemDB", "entity_id", entity.ID, "error", err)
+ i.logger.Error("failed to load local aliases from storage", "error", err)
return
}
- }
- // Get the storage bucket entry
- bucket, err := i.entityPacker.GetBucket(ctx, key)
- if err != nil {
- i.logger.Error("failed to refresh entities", "key", key, "error", err)
- return
- }
-
- // If the underlying entry is nil, it means that this invalidation
- // notification is for the deletion of the underlying storage entry. At
- // this point, since all the entities belonging to this bucket are
- // already removed, there is nothing else to be done. But, if the
- // storage entry is non-nil, its an indication of an update. In this
- // case, entities in the updated bucket needs to be reinserted into
- // MemDB.
- var entityIDs []string
- if bucket != nil {
- entityIDs = make([]string, 0, len(bucket.Items))
- for _, item := range bucket.Items {
- entity, err := i.parseEntityFromBucketItem(ctx, item)
- if err != nil {
- i.logger.Error("failed to parse entity from bucket entry item", "error", err)
- return
+ if localAliases != nil {
+ for _, alias := range localAliases.Aliases {
+ bucketEntity.UpsertAlias(alias)
}
+ }
- localAliases, err := i.parseLocalAliases(entity.ID)
- if err != nil {
- i.logger.Error("failed to load local aliases from storage", "error", err)
- return
- }
- if localAliases != nil {
- for _, alias := range localAliases.Aliases {
- entity.UpsertAlias(alias)
- }
+ var memDBEntity *identity.Entity
+ for i, entity := range memDBEntities {
+ if entity.ID == bucketEntity.ID {
+ memDBEntity = entity
+
+ // Remove this processed entity from the slice, so that
+ // all tht will be left are unprocessed entities.
+ copy(memDBEntities[i:], memDBEntities[i+1:])
+ memDBEntities = memDBEntities[:len(memDBEntities)-1]
+ break
}
+ }
+
+ // If the entity is not in MemDB or if it is but differs from the
+ // state that's in the bucket storage entry, upsert it into MemDB.
- // Only update MemDB and don't touch the storage
- err = i.upsertEntityInTxn(ctx, txn, entity, nil, false)
+ // We've considered the use of github.com/google/go-cmp here,
+ // but opted for sticking with reflect.DeepEqual because go-cmp
+ // is intended for testing and is able to panic in some
+ // situations.
+ if memDBEntity == nil || !reflect.DeepEqual(memDBEntity, bucketEntity) {
+ // The entity is not in MemDB, it's a new entity. Add it to MemDB.
+ err = i.upsertEntityInTxn(ctx, txn, bucketEntity, nil, false)
if err != nil {
- i.logger.Error("failed to update entity in MemDB", "error", err)
+ i.logger.Error("failed to update entity in MemDB", "entity_id", bucketEntity.ID, "error", err)
return
}
- // If we are a secondary, the entity created by the secondary
- // via the CreateEntity RPC would have been cached. Now that the
- // invalidation of the same has hit, there is no need of the
- // cache. Clearing the cache. Writing to storage can't be
- // performed by perf standbys. So only doing this in the active
- // node of the secondary.
+ // If this is a performance secondary, the entity created on
+ // this node would have been cached in a local cache based on
+ // the result of the CreateEntity RPC call to the primary
+ // cluster. Since this invalidation is signaling that the
+ // entity is now in the primary cluster's storage, the locally
+ // cached entry can be removed.
if i.localNode.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) && i.localNode.HAState() == consts.Active {
- if err := i.localAliasPacker.DeleteItem(ctx, entity.ID+tmpSuffix); err != nil {
- i.logger.Error("failed to clear local alias entity cache", "error", err, "entity_id", entity.ID)
+ if err := i.localAliasPacker.DeleteItem(ctx, bucketEntity.ID+tmpSuffix); err != nil {
+ i.logger.Error("failed to clear local alias entity cache", "error", err, "entity_id", bucketEntity.ID)
return
}
}
-
- entityIDs = append(entityIDs, entity.ID)
}
}
+ }
+
+ // Any entities that are still in the memDBEntities slice are ones that do
+ // not exist in the bucket storage entry. These entities have to be removed
+ // from MemDB.
+ for _, memDBEntity := range memDBEntities {
+ err = i.deleteAliasesInEntityInTxn(txn, memDBEntity, memDBEntity.Aliases)
+ if err != nil {
+ i.logger.Error("failed to delete aliases in entity", "entity_id", memDBEntity.ID, "error", err)
+ return
+ }
+
+ err = i.MemDBDeleteEntityByIDInTxn(txn, memDBEntity.ID)
+ if err != nil {
+ i.logger.Error("failed to delete entity from MemDB", "entity_id", memDBEntity.ID, "error", err)
+ return
+ }
- // entitiesFetched are the entities before invalidation. entityIDs
- // represent entities that are valid after invalidation. Clear the
- // storage entries of local aliases for those entities that are
- // indicated deleted by this invalidation.
+ // In addition, if this is an active node of a performance secondary
+ // cluster, remove the local alias storage entry for this deleted entity.
if i.localNode.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) && i.localNode.HAState() == consts.Active {
- for _, entity := range entitiesFetched {
- if !strutil.StrListContains(entityIDs, entity.ID) {
- if err := i.localAliasPacker.DeleteItem(ctx, entity.ID); err != nil {
- i.logger.Error("failed to clear local alias for entity", "error", err, "entity_id", entity.ID)
- return
- }
- }
+ if err := i.localAliasPacker.DeleteItem(ctx, memDBEntity.ID); err != nil {
+ i.logger.Error("failed to clear local alias for entity", "error", err, "entity_id", memDBEntity.ID)
+ return
}
}
+ }
- txn.Commit()
- return
+ txn.Commit()
+}
- // Check if the key is a storage entry key for an group bucket
- // For those entities that are deleted, clear up the local alias entries
- case strings.HasPrefix(key, groupBucketsPrefix):
- // Create a MemDB transaction
- txn := i.db.Txn(true)
- defer txn.Abort()
+func (i *IdentityStore) invalidateGroupBucket(ctx context.Context, key string) {
+ // Create a MemDB transaction
+ txn := i.db.Txn(true)
+ defer txn.Abort()
+
+ groupsFetched, err := i.MemDBGroupsByBucketKeyInTxn(txn, key)
+ if err != nil {
+ i.logger.Error("failed to fetch groups using the bucket key", "key", key)
+ return
+ }
- groupsFetched, err := i.MemDBGroupsByBucketKeyInTxn(txn, key)
+ for _, group := range groupsFetched {
+ // Delete the group using the same transaction
+ err = i.MemDBDeleteGroupByIDInTxn(txn, group.ID)
if err != nil {
- i.logger.Error("failed to fetch groups using the bucket key", "key", key)
+ i.logger.Error("failed to delete group from MemDB", "group_id", group.ID, "error", err)
return
}
- for _, group := range groupsFetched {
- // Delete the group using the same transaction
- err = i.MemDBDeleteGroupByIDInTxn(txn, group.ID)
+ if group.Alias != nil {
+ err := i.MemDBDeleteAliasByIDInTxn(txn, group.Alias.ID, true)
if err != nil {
- i.logger.Error("failed to delete group from MemDB", "group_id", group.ID, "error", err)
+ i.logger.Error("failed to delete group alias from MemDB", "error", err)
return
}
+ }
+ }
+
+ // Get the storage bucket entry
+ bucket, err := i.groupPacker.GetBucket(ctx, key)
+ if err != nil {
+ i.logger.Error("failed to refresh group", "key", key, "error", err)
+ return
+ }
- if group.Alias != nil {
- err := i.MemDBDeleteAliasByIDInTxn(txn, group.Alias.ID, true)
+ if bucket != nil {
+ for _, item := range bucket.Items {
+ group, err := i.parseGroupFromBucketItem(item)
+ if err != nil {
+ i.logger.Error("failed to parse group from bucket entry item", "error", err)
+ return
+ }
+
+ // Before updating the group, check if the group exists. If it
+ // does, then delete the group alias from memdb, for the
+ // invalidation would have sent an update.
+ groupFetched, err := i.MemDBGroupByIDInTxn(txn, group.ID, true)
+ if err != nil {
+ i.logger.Error("failed to fetch group from MemDB", "error", err)
+ return
+ }
+
+ // If the group has an alias remove it from memdb
+ if groupFetched != nil && groupFetched.Alias != nil {
+ err := i.MemDBDeleteAliasByIDInTxn(txn, groupFetched.Alias.ID, true)
if err != nil {
- i.logger.Error("failed to delete group alias from MemDB", "error", err)
+ i.logger.Error("failed to delete old group alias from MemDB", "error", err)
return
}
}
- }
- // Get the storage bucket entry
- bucket, err := i.groupPacker.GetBucket(ctx, key)
- if err != nil {
- i.logger.Error("failed to refresh group", "key", key, "error", err)
- return
+ // Only update MemDB and don't touch the storage
+ err = i.UpsertGroupInTxn(ctx, txn, group, false)
+ if err != nil {
+ i.logger.Error("failed to update group in MemDB", "error", err)
+ return
+ }
}
+ }
- if bucket != nil {
- for _, item := range bucket.Items {
- group, err := i.parseGroupFromBucketItem(item)
- if err != nil {
- i.logger.Error("failed to parse group from bucket entry item", "error", err)
- return
- }
+ txn.Commit()
+}
- // Before updating the group, check if the group exists. If it
- // does, then delete the group alias from memdb, for the
- // invalidation would have sent an update.
- groupFetched, err := i.MemDBGroupByIDInTxn(txn, group.ID, true)
- if err != nil {
- i.logger.Error("failed to fetch group from MemDB", "error", err)
- return
- }
+// invalidateOIDCToken is called by the Invalidate function to handle the
+// invalidation of an OIDC token storage entry.
+func (i *IdentityStore) invalidateOIDCToken(ctx context.Context) {
+ ns, err := namespace.FromContext(ctx)
+ if err != nil {
+ i.logger.Error("error retrieving namespace", "error", err)
+ return
+ }
- // If the group has an alias remove it from memdb
- if groupFetched != nil && groupFetched.Alias != nil {
- err := i.MemDBDeleteAliasByIDInTxn(txn, groupFetched.Alias.ID, true)
- if err != nil {
- i.logger.Error("failed to delete old group alias from MemDB", "error", err)
- return
- }
- }
+ // Wipe the cache for the requested namespace. This will also clear
+ // the shared namespace as well.
+ if err := i.oidcCache.Flush(ns); err != nil {
+ i.logger.Error("error flushing oidc cache", "error", err)
+ return
+ }
+}
- // Only update MemDB and don't touch the storage
- err = i.UpsertGroupInTxn(ctx, txn, group, false)
- if err != nil {
- i.logger.Error("failed to update group in MemDB", "error", err)
- return
- }
- }
- }
+// invalidateClientPath is called by the Invalidate function to handle the
+// invalidation of a client path storage entry.
+func (i *IdentityStore) invalidateClientPath(ctx context.Context, key string) {
+ name := strings.TrimPrefix(key, clientPath)
- txn.Commit()
+ // Invalidate the cached client in memdb
+ if err := i.memDBDeleteClientByName(ctx, name); err != nil {
+ i.logger.Error("error invalidating client", "error", err, "key", key)
return
+ }
+}
- case strings.HasPrefix(key, oidcTokensPrefix):
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- i.logger.Error("error retrieving namespace", "error", err)
- return
- }
+// invalidateLocalAliasBucket is called by the Invalidate function to handle the
+// invalidation of a local alias bucket storage entry.
+func (i *IdentityStore) invalidateLocalAliasesBucket(ctx context.Context, key string) {
+ // This invalidation only happens on performance standby servers
- // Wipe the cache for the requested namespace. This will also clear
- // the shared namespace as well.
- if err := i.oidcCache.Flush(ns); err != nil {
- i.logger.Error("error flushing oidc cache", "error", err)
- }
- case strings.HasPrefix(key, clientPath):
- name := strings.TrimPrefix(key, clientPath)
+ // Create a MemDB transaction and abort it once this function returns
+ txn := i.db.Txn(true)
+ defer txn.Abort()
- // Invalidate the cached client in memdb
- if err := i.memDBDeleteClientByName(ctx, name); err != nil {
- i.logger.Error("error invalidating client", "error", err, "key", key)
- return
- }
- case strings.HasPrefix(key, localAliasesBucketsPrefix):
- //
- // This invalidation only happens on perf standbys
- //
-
- txn := i.db.Txn(true)
- defer txn.Abort()
-
- // Find all the local aliases belonging to this bucket and remove it
- // both from aliases table and entities table. We will add the local
- // aliases back by parsing the storage key. This way the deletion
- // invalidation gets handled.
- aliases, err := i.MemDBLocalAliasesByBucketKeyInTxn(txn, key)
- if err != nil {
- i.logger.Error("failed to fetch entities using the bucket key", "key", key)
- return
- }
+ // Local aliases have the added complexity of being associated with
+ // entities. Whenever a local alias is updated or inserted into MemDB, its
+ // associated MemDB-stored entity must also be updated.
+ //
+ // This function retrieves all local aliases that have a corresponding
+ // storage key that matches the provided key to invalidate. This is the
+ // set of local aliases that need to be updated, removed, or left
+ // alone in MemDB. Each of these operations is done as its own MemDB
+ // operation, but the corresponding changes that need to be made to the
+ // associated entities can be batched together to cut down on the number of
+ // MemDB operations.
+ //
+ // The logic iterates over every local alias stored at the invalidated key.
+ // For each local alias read from the storage entry, the set of local
+ // aliases read from MemDB is searched for the same local alias. If it can't
+ // be found, it means that it needs to be inserted into MemDB. However, if
+ // it's found, it must be compared with the local alias from the storage. If
+ // they don't match, it means that the local alias in MemDB needs to be
+ // updated. If they did match, it means that this particular local alias did
+ // not change in storage, so nothing further needs to be done. Each local
+ // alias processed in this loop is removed from the set of retrieved local
+ // aliases. The local alias is also added to the map tracking local aliases
+ // that need to be upserted in their associated entities in MemDB.
+ //
+ // Once the code is done iterating over all of the local aliases from
+ // storage, any local aliases still in the set retrieved from MemDB
+ // corresponds to a local alias that is no longer in storage and must be
+ // removed from MemDB. These local aliases are added to the map tracking
+ // local aliases to remove from their entities in MemDB. The actual removal
+ // of the local aliases themselves is done as part of the tidying up of the
+ // associated entities, described below.
+ //
+ // In order to batch the changes to the associated entities, a map of entity
+ // to local aliases (slice of local alias) is built up in the loop that
+ // iterates over the local aliases from storage. Similarly, the code that
+ // detects which local aliases to remove from MemDB also builds a separate
+ // map of entity to local aliases (slice of local alias). Each element in
+ // the map of local aliases to update in their entity is processed as
+ // follows: the mapped slice of local aliases is iterated over and each
+ // local alias is upserted into the entity and then the entity itself is
+ // upserted. Then, each element in the map of local aliases to remove from
+ // their entity is processed as follows: the
+
+ // Get all cached local aliases to compare with invalidated bucket
+ memDBLocalAliases, err := i.MemDBLocalAliasesByBucketKeyInTxn(txn, key)
+ if err != nil {
+ i.logger.Error("failed to fetch local aliases using the bucket key", "key", key, "error", err)
+ return
+ }
- for _, alias := range aliases {
- entity, err := i.MemDBEntityByIDInTxn(txn, alias.CanonicalID, true)
- if err != nil {
- i.logger.Error("failed to fetch entity during local alias invalidation", "entity_id", alias.CanonicalID, "error", err)
- return
- }
- if entity == nil {
- i.logger.Error("failed to fetch entity during local alias invalidation, missing entity", "entity_id", alias.CanonicalID, "error", err)
+ // Get local aliases from the invalidated bucket
+ bucket, err := i.localAliasPacker.GetBucket(ctx, key)
+ if err != nil {
+ i.logger.Error("failed to refresh local aliases", "key", key, "error", err)
+ return
+ }
+
+ // This map tracks the set of local aliases that need to be updated in each
+ // affected entity in MemDB.
+ entityLocalAliasesToUpsert := map[*identity.Entity][]*identity.Alias{}
+
+ // This map tracks the set of local aliases that need to be removed from
+ // their affected entity in MemDB, as well as removing the local alias
+ // themselves.
+ entityLocalAliasesToRemove := map[*identity.Entity][]*identity.Alias{}
+
+ if bucket != nil {
+ // The storage entry for the local alias bucket exists, so we need to
+ // compare the local aliases in that bucket with those in MemDB and only
+ // update those that are different. The local aliases in the bucket are
+ // the source of truth.
+
+ // Iterate over each local alias item from the bucket
+ for _, item := range bucket.Items {
+ if strings.HasSuffix(item.ID, tmpSuffix) {
continue
}
- // Delete local aliases from the entity.
- err = i.deleteAliasesInEntityInTxn(txn, entity, []*identity.Alias{alias})
- if err != nil {
- i.logger.Error("failed to delete aliases in entity", "entity_id", entity.ID, "error", err)
- return
- }
+ var bucketLocalAliases identity.LocalAliases
- // Update the entity with removed alias.
- if err := i.MemDBUpsertEntityInTxn(txn, entity); err != nil {
- i.logger.Error("failed to delete entity from MemDB", "entity_id", entity.ID, "error", err)
+ err = anypb.UnmarshalTo(item.Message, &bucketLocalAliases, proto.UnmarshalOptions{})
+ if err != nil {
+ i.logger.Error("failed to parse local aliases during invalidation", "item_id", item.ID, "error", err)
return
}
- }
- // Now read the invalidated storage key
- bucket, err := i.localAliasPacker.GetBucket(ctx, key)
- if err != nil {
- i.logger.Error("failed to refresh local aliases", "key", key, "error", err)
- return
- }
- if bucket != nil {
- for _, item := range bucket.Items {
- if strings.HasSuffix(item.ID, tmpSuffix) {
- continue
- }
-
- var localAliases identity.LocalAliases
- err = ptypes.UnmarshalAny(item.Message, &localAliases)
- if err != nil {
- i.logger.Error("failed to parse local aliases during invalidation", "error", err)
+ for _, bucketLocalAlias := range bucketLocalAliases.Aliases {
+ // Find the entity related to bucketLocalAlias in MemDB in order
+ // to track any local aliases modifications that must be made in
+ // this entity.
+ memDBEntity := i.FetchEntityForLocalAliasInTxn(txn, bucketLocalAlias)
+ if memDBEntity == nil {
+ // FetchEntityForLocalAliasInTxn already logs any error
return
}
- for _, alias := range localAliases.Aliases {
- // Add to the aliases table
- if err := i.MemDBUpsertAliasInTxn(txn, alias, false); err != nil {
- i.logger.Error("failed to insert local alias to memdb during invalidation", "error", err)
- return
+
+ // memDBLocalAlias starts off nil but gets set to the local
+ // alias from memDBLocalAliases whose ID matches the ID of
+ // bucketLocalAlias.
+ var memDBLocalAlias *identity.Alias
+ for i, localAlias := range memDBLocalAliases {
+ if localAlias.ID == bucketLocalAlias.ID {
+ memDBLocalAlias = localAlias
+
+ // Remove this processed local alias from the
+ // memDBLocalAliases slice, so that all that
+ // will be left are unprocessed local aliases.
+ copy(memDBLocalAliases[i:], memDBLocalAliases[i+1:])
+ memDBLocalAliases = memDBLocalAliases[:len(memDBLocalAliases)-1]
+
+ break
}
+ }
- // Fetch the associated entity and add the alias to that too.
- entity, err := i.MemDBEntityByIDInTxn(txn, alias.CanonicalID, false)
+ // We've considered the use of github.com/google/go-cmp here,
+ // but opted for sticking with reflect.DeepEqual because go-cmp
+ // is intended for testing and is able to panic in some
+ // situations.
+ if memDBLocalAlias == nil || !reflect.DeepEqual(memDBLocalAlias, bucketLocalAlias) {
+ // The bucketLocalAlias is not in MemDB or it has changed in
+ // storage.
+ err = i.MemDBUpsertAliasInTxn(txn, bucketLocalAlias, false)
if err != nil {
- i.logger.Error("failed to fetch entity during local alias invalidation", "error", err)
+ i.logger.Error("failed to update local alias in MemDB", "alias_id", bucketLocalAlias.ID, "error", err)
return
}
- if entity == nil {
- cachedEntityItem, err := i.localAliasPacker.GetItem(alias.CanonicalID + tmpSuffix)
- if err != nil {
- i.logger.Error("failed to fetch cached entity", "key", key, "error", err)
- return
- }
- if cachedEntityItem != nil {
- entity, err = i.parseCachedEntity(cachedEntityItem)
- if err != nil {
- i.logger.Error("failed to parse cached entity", "key", key, "error", err)
- return
- }
- }
- }
- if entity == nil {
- i.logger.Error("received local alias invalidation for an invalid entity", "item.ID", item.ID)
- return
- }
- entity.UpsertAlias(alias)
- // Update the entities table
- if err := i.MemDBUpsertEntityInTxn(txn, entity); err != nil {
- i.logger.Error("failed to upsert entity during local alias invalidation", "error", err)
- return
- }
+ // Add this local alias to the set of local aliases that
+ // need to be updated for memDBEntity.
+ entityLocalAliasesToUpsert[memDBEntity] = append(entityLocalAliasesToUpsert[memDBEntity], bucketLocalAlias)
}
}
}
- txn.Commit()
- return
}
+
+ // Any local aliases still remaining in memDBLocalAliases do not exist in
+ // storage and should be removed from MemDB.
+ for _, memDBLocalAlias := range memDBLocalAliases {
+ memDBEntity := i.FetchEntityForLocalAliasInTxn(txn, memDBLocalAlias)
+ if memDBEntity == nil {
+ // FetchEntityForLocalAliasInTxn already logs any error
+ return
+ }
+
+ entityLocalAliasesToRemove[memDBEntity] = append(entityLocalAliasesToRemove[memDBEntity], memDBLocalAlias)
+ }
+
+ // Now process the entityLocalAliasesToUpsert map.
+ for entity, localAliases := range entityLocalAliasesToUpsert {
+ for _, localAlias := range localAliases {
+ entity.UpsertAlias(localAlias)
+ }
+
+ err = i.MemDBUpsertEntityInTxn(txn, entity)
+ if err != nil {
+ i.logger.Error("failed to update entity in MemDB", "entity_id", entity.ID, "error", err)
+ return
+ }
+ }
+
+ // Finally process the entityLocalAliasesToRemove map.
+ for entity, localAliases := range entityLocalAliasesToRemove {
+ // The deleteAliasesInEntityInTxn removes the provided aliases from
+ // the entity, but it also removes the aliases themselves from MemDB.
+ err := i.deleteAliasesInEntityInTxn(txn, entity, localAliases)
+ if err != nil {
+ i.logger.Error("failed to delete aliases in entity", "entity_id", entity.ID, "error", err)
+ return
+ }
+
+ err = i.MemDBUpsertEntityInTxn(txn, entity)
+ if err != nil {
+ i.logger.Error("failed to update entity in MemDB", "entity_id", entity.ID, "error", err)
+ return
+ }
+ }
+
+ txn.Commit()
}
func (i *IdentityStore) parseLocalAliases(entityID string) (*identity.LocalAliases, error) {
diff --git a/vault/identity_store_test.go b/vault/identity_store_test.go
index 9ed4659b8d27..7c826dfa0c33 100644
--- a/vault/identity_store_test.go
+++ b/vault/identity_store_test.go
@@ -18,6 +18,7 @@ import (
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/helper/storagepacker"
"github.com/hashicorp/vault/sdk/logical"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/types/known/anypb"
)
@@ -912,3 +913,217 @@ func TestIdentityStore_DeleteCaseSensitivityKey(t *testing.T) {
t.Fatalf("bad: expected no entry for casesensitivity key")
}
}
+
+// TestIdentityStoreInvalidate_Entities verifies the proper handling of
+// entities in the Invalidate method.
+func TestIdentityStoreInvalidate_Entities(t *testing.T) {
+ c, _, _ := TestCoreUnsealed(t)
+
+ // Create an entity in storage then call the Invalidate function
+ //
+ id, err := uuid.GenerateUUID()
+ require.NoError(t, err)
+
+ entity := &identity.Entity{
+ Name: "test",
+ NamespaceID: namespace.RootNamespaceID,
+ ID: id,
+ Aliases: []*identity.Alias{},
+ BucketKey: c.identityStore.entityPacker.BucketKey(id),
+ }
+
+ p := c.identityStore.entityPacker
+
+ // Persist the entity which we are merging to
+ entityAsAny, err := anypb.New(entity)
+ require.NoError(t, err)
+
+ item := &storagepacker.Item{
+ ID: id,
+ Message: entityAsAny,
+ }
+
+ err = p.PutItem(context.Background(), item)
+ require.NoError(t, err)
+
+ c.identityStore.Invalidate(context.Background(), p.BucketKey(id))
+
+ txn := c.identityStore.db.Txn(true)
+
+ memEntity, err := c.identityStore.MemDBEntityByIDInTxn(txn, id, true)
+ assert.NoError(t, err)
+ assert.NotNil(t, memEntity)
+
+ txn.Commit()
+
+ // Modify the entity in storage then call the Invalidate function
+ entity.Metadata = make(map[string]string)
+ entity.Metadata["foo"] = "bar"
+
+ entityAsAny, err = anypb.New(entity)
+ require.NoError(t, err)
+
+ item.Message = entityAsAny
+
+ p.PutItem(context.Background(), item)
+
+ c.identityStore.Invalidate(context.Background(), p.BucketKey(id))
+
+ txn = c.identityStore.db.Txn(true)
+
+ memEntity, err = c.identityStore.MemDBEntityByIDInTxn(txn, id, true)
+ assert.NoError(t, err)
+ assert.Contains(t, memEntity.Metadata, "foo")
+
+ txn.Commit()
+
+ // Delete the entity in storage then call the Invalidate function
+ err = p.DeleteItem(context.Background(), id)
+ require.NoError(t, err)
+
+ c.identityStore.Invalidate(context.Background(), p.BucketKey(id))
+
+ txn = c.identityStore.db.Txn(true)
+
+ memEntity, err = c.identityStore.MemDBEntityByIDInTxn(txn, id, true)
+ assert.NoError(t, err)
+ assert.Nil(t, memEntity)
+
+ txn.Commit()
+}
+
+// TestIdentityStoreInvalidate_LocalAliasesWithEntity verifies the correct
+// handling of local aliases in the Invalidate method.
+func TestIdentityStoreInvalidate_LocalAliasesWithEntity(t *testing.T) {
+ c, _, _ := TestCoreUnsealed(t)
+
+ // Create an entity in storage then call the Invalidate function
+ //
+ entityID, err := uuid.GenerateUUID()
+ require.NoError(t, err)
+
+ entity := &identity.Entity{
+ Name: "test",
+ NamespaceID: namespace.RootNamespaceID,
+ ID: entityID,
+ Aliases: []*identity.Alias{},
+ BucketKey: c.identityStore.entityPacker.BucketKey(entityID),
+ }
+
+ aliasID, err := uuid.GenerateUUID()
+ require.NoError(t, err)
+
+ localAliases := &identity.LocalAliases{
+ Aliases: []*identity.Alias{
+ {
+ ID: aliasID,
+ Name: "test",
+ NamespaceID: namespace.RootNamespaceID,
+ CanonicalID: entityID,
+ MountAccessor: "userpass-000000",
+ },
+ },
+ }
+
+ ep := c.identityStore.entityPacker
+
+ // Persist the entity which we are merging to
+ entityAsAny, err := anypb.New(entity)
+ require.NoError(t, err)
+
+ entityItem := &storagepacker.Item{
+ ID: entityID,
+ Message: entityAsAny,
+ }
+
+ err = ep.PutItem(context.Background(), entityItem)
+ require.NoError(t, err)
+
+ c.identityStore.Invalidate(context.Background(), ep.BucketKey(entityID))
+
+ lap := c.identityStore.localAliasPacker
+
+ localAliasesAsAny, err := anypb.New(localAliases)
+ require.NoError(t, err)
+
+ localAliasesItem := &storagepacker.Item{
+ ID: entityID,
+ Message: localAliasesAsAny,
+ }
+
+ err = lap.PutItem(context.Background(), localAliasesItem)
+ require.NoError(t, err)
+
+ c.identityStore.Invalidate(context.Background(), lap.BucketKey(entityID))
+
+ txn := c.identityStore.db.Txn(true)
+
+ memDBEntity, err := c.identityStore.MemDBEntityByIDInTxn(txn, entityID, true)
+ assert.NoError(t, err)
+ assert.NotNil(t, memDBEntity)
+
+ memDBLocalAlias, err := c.identityStore.MemDBAliasByIDInTxn(txn, aliasID, true, false)
+ assert.NoError(t, err)
+ assert.NotNil(t, memDBLocalAlias)
+ assert.Equal(t, 1, len(memDBEntity.Aliases))
+ assert.NotNil(t, memDBEntity.Aliases[0])
+ assert.Equal(t, memDBEntity.Aliases[0].ID, memDBLocalAlias.ID)
+
+ txn.Commit()
+}
+
+// TestIdentityStoreInvalidate_TemporaryEntity verifies the proper handling of
+// temporary entities in the Invalidate method.
+func TestIdentityStoreInvalidate_TemporaryEntity(t *testing.T) {
+ c, _, _ := TestCoreUnsealed(t)
+
+ // Create an entity in storage then call the Invalidate function
+ //
+ entityID, err := uuid.GenerateUUID()
+ require.NoError(t, err)
+
+ tempEntity := &identity.Entity{
+ Name: "test",
+ NamespaceID: namespace.RootNamespaceID,
+ ID: entityID,
+ Aliases: []*identity.Alias{},
+ BucketKey: c.identityStore.entityPacker.BucketKey(entityID),
+ }
+
+ lap := c.identityStore.localAliasPacker
+ ep := c.identityStore.entityPacker
+
+ // Persist the entity which we are merging to
+ tempEntityAsAny, err := anypb.New(tempEntity)
+ require.NoError(t, err)
+
+ tempEntityItem := &storagepacker.Item{
+ ID: entityID + tmpSuffix,
+ Message: tempEntityAsAny,
+ }
+
+ err = lap.PutItem(context.Background(), tempEntityItem)
+ require.NoError(t, err)
+
+ entityAsAny := tempEntityAsAny
+
+ entityItem := &storagepacker.Item{
+ ID: entityID,
+ Message: entityAsAny,
+ }
+
+ err = ep.PutItem(context.Background(), entityItem)
+ require.NoError(t, err)
+
+ c.identityStore.Invalidate(context.Background(), ep.BucketKey(entityID))
+
+ txn := c.identityStore.db.Txn(true)
+
+ memDBEntity, err := c.identityStore.MemDBEntityByIDInTxn(txn, entityID, true)
+ assert.NoError(t, err)
+ assert.NotNil(t, memDBEntity)
+
+ item, err := lap.GetItem(lap.BucketKey(entityID) + tmpSuffix)
+ assert.NoError(t, err)
+ assert.Nil(t, item)
+}
diff --git a/vault/identity_store_util.go b/vault/identity_store_util.go
index 6d9190cbe293..c78db0bc70f7 100644
--- a/vault/identity_store_util.go
+++ b/vault/identity_store_util.go
@@ -1269,6 +1269,36 @@ func (i *IdentityStore) MemDBDeleteEntityByID(entityID string) error {
return nil
}
+// FetchEntityForLocalAliasInTxn fetches the entity associated with the provided
+// local identity.Alias. MemDB will first be searched for the entity. If it is
+// not found there, the localAliasPacker storagepacker.StoragePacker will be
+// used. If an error occurs, an appropriate error message is logged and nil is
+// returned.
+func (i *IdentityStore) FetchEntityForLocalAliasInTxn(txn *memdb.Txn, alias *identity.Alias) *identity.Entity {
+ entity, err := i.MemDBEntityByIDInTxn(txn, alias.CanonicalID, false)
+ if err != nil {
+ i.logger.Error("failed to fetch entity from local alias", "entity_id", alias.CanonicalID, "error", err)
+ return nil
+ }
+
+ if entity == nil {
+ cachedEntityItem, err := i.localAliasPacker.GetItem(alias.CanonicalID + tmpSuffix)
+ if err != nil {
+ i.logger.Error("failed to fetch cached entity from local alias", "key", alias.CanonicalID+tmpSuffix, "error", err)
+ return nil
+ }
+ if cachedEntityItem != nil {
+ entity, err = i.parseCachedEntity(cachedEntityItem)
+ if err != nil {
+ i.logger.Error("failed to parse cached entity", "key", alias.CanonicalID+tmpSuffix, "error", err)
+ return nil
+ }
+ }
+ }
+
+ return entity
+}
+
func (i *IdentityStore) MemDBDeleteEntityByIDInTxn(txn *memdb.Txn, entityID string) error {
if entityID == "" {
return nil
diff --git a/vault/logical_system.go b/vault/logical_system.go
index 14c90422d64e..5627a1736831 100644
--- a/vault/logical_system.go
+++ b/vault/logical_system.go
@@ -1409,6 +1409,9 @@ func (b *SystemBackend) mountInfo(ctx context.Context, entry *MountEntry) map[st
}
entryConfig["user_lockout_config"] = userLockoutConfig
}
+ if rawVal, ok := entry.synthesizedConfigCache.Load("delegated_auth_accessors"); ok {
+ entryConfig["delegated_auth_accessors"] = rawVal.([]string)
+ }
// Add deprecation status only if it exists
builtinType := b.Core.builtinTypeFromMountEntry(ctx, entry)
diff --git a/vault/logical_system_activity.go b/vault/logical_system_activity.go
index 28d2763b5abc..c286b572d07d 100644
--- a/vault/logical_system_activity.go
+++ b/vault/logical_system_activity.go
@@ -311,7 +311,7 @@ func (b *SystemBackend) handleActivityConfigRead(ctx context.Context, req *logic
return logical.ErrorResponse("no activity log present"), nil
}
- config, err := a.loadConfigOrDefault(ctx, b.Core.ManualLicenseReportingEnabled())
+ config, err := a.loadConfigOrDefault(ctx)
if err != nil {
return nil, err
}
@@ -348,7 +348,7 @@ func (b *SystemBackend) handleActivityConfigUpdate(ctx context.Context, req *log
warnings := make([]string, 0)
- config, err := a.loadConfigOrDefault(ctx, b.Core.ManualLicenseReportingEnabled())
+ config, err := a.loadConfigOrDefault(ctx)
if err != nil {
return nil, err
}
diff --git a/vault/seal/seal_wrapper.go b/vault/seal/seal_wrapper.go
index 421024c7e8a1..48a77ff394c2 100644
--- a/vault/seal/seal_wrapper.go
+++ b/vault/seal/seal_wrapper.go
@@ -51,29 +51,32 @@ type SealWrapper struct {
func NewSealWrapper(wrapper wrapping.Wrapper, priority int, name string, sealConfigType string, disabled bool, configured bool) *SealWrapper {
ret := &SealWrapper{
- Wrapper: wrapper,
- Priority: priority,
- Name: name,
- SealConfigType: sealConfigType,
- Disabled: disabled,
- Configured: configured,
+ Wrapper: wrapper,
+ Priority: priority,
+ Name: name,
+ SealConfigType: sealConfigType,
+ Disabled: disabled,
+ Configured: configured,
+ lastSeenHealthy: time.Now(),
+ healthy: false,
}
if configured {
- setHealth(ret, true, time.Now(), ret.lastHealthCheck)
- } else {
- setHealth(ret, false, time.Now(), ret.lastHealthCheck)
+ ret.healthy = true
}
return ret
}
func (sw *SealWrapper) SetHealthy(healthy bool, checkTime time.Time) {
+ sw.hcLock.Lock()
+ defer sw.hcLock.Unlock()
+
+ sw.healthy = healthy
+ sw.lastHealthCheck = checkTime
+
if healthy {
- setHealth(sw, true, checkTime, checkTime)
- } else {
- // do not update lastSeenHealthy
- setHealth(sw, false, sw.lastHealthCheck, checkTime)
+ sw.lastSeenHealthy = checkTime
}
}
@@ -134,13 +137,3 @@ func getHealth(sw *SealWrapper) (healthy bool, lastSeenHealthy time.Time, lastHe
return sw.healthy, sw.lastSeenHealthy, sw.lastHealthCheck
}
-
-// setHealth is the only function allowed to mutate the health fields
-func setHealth(sw *SealWrapper, healthy bool, lastSeenHealthy, lastHealthCheck time.Time) {
- sw.hcLock.Lock()
- defer sw.hcLock.Unlock()
-
- sw.healthy = healthy
- sw.lastSeenHealthy = lastSeenHealthy
- sw.lastHealthCheck = lastHealthCheck
-}
diff --git a/version/VERSION b/version/VERSION
index b8ae5a5b4d68..709c55fe2d7d 100644
--- a/version/VERSION
+++ b/version/VERSION
@@ -1 +1 @@
-1.16.3
\ No newline at end of file
+1.16.4
\ No newline at end of file
diff --git a/website/content/api-docs/auth/aws.mdx b/website/content/api-docs/auth/aws.mdx
index 2a490c150845..b4bb11fa2cb8 100644
--- a/website/content/api-docs/auth/aws.mdx
+++ b/website/content/api-docs/auth/aws.mdx
@@ -203,16 +203,17 @@ This configures the way that Vault interacts with the
### Parameters
- `iam_alias` `(string: "role_id")` - How to generate the identity alias when
- using the `iam` auth method. Valid choices are `role_id`, `unique_id`, and
- `full_arn` When `role_id` is selected, the randomly generated ID of the Vault role
+ using the `iam` auth method. Valid choices are `role_id`, `unique_id`, `canonical_arn` and
+ `full_arn`. When `role_id` is selected, the randomly generated ID of the Vault role
is used. When `unique_id` is selected, the [IAM Unique
ID](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers#identifiers-unique-ids)
of the IAM principal (either the user or role) is used as the identity alias
- name. When `full_arn` is selected, the ARN returned by the
- `sts:GetCallerIdentity` call is used as the alias name. This is either
+ name. When `canonical_arn` is selected, the role ARN returned by the `sts:GetCallerIdentity`call
+ will be used. This will be `arn:aws:iam:::role/`. When `full_arn` is selected,
+ the ARN returned by the `sts:GetCallerIdentity` call is used as the alias name. This is either
`arn:aws:iam:::user/` or
`arn:aws:sts:::assumed-role//`.
- **Note**: if you select `full_arn` and then delete and recreate the IAM role,
+ **Note**: if you select `canonical_arn` or `full_arn` and then delete and recreate the IAM role,
Vault won't be aware and any identity aliases set up for the role name will
still be valid.
diff --git a/website/content/api-docs/auth/cert.mdx b/website/content/api-docs/auth/cert.mdx
index eb6cbb9a03fa..a838e27e63e5 100644
--- a/website/content/api-docs/auth/cert.mdx
+++ b/website/content/api-docs/auth/cert.mdx
@@ -384,7 +384,7 @@ $ curl \
--request POST \
--cacert vault-ca.pem \
--data @payload.json \
- https://127.0.0.1:8200/v1/auth/cert/certs/cert1
+ https://127.0.0.1:8200/v1/auth/cert/config
```
## Login with TLS certificate method
diff --git a/website/content/api-docs/secret/pki.mdx b/website/content/api-docs/secret/pki.mdx
index c199b4f40bca..1329084b17bc 100644
--- a/website/content/api-docs/secret/pki.mdx
+++ b/website/content/api-docs/secret/pki.mdx
@@ -1491,7 +1491,8 @@ $ curl \
This endpoint revokes a certificate using its serial number. This is an
alternative option to the standard method of revoking using Vault lease IDs. A
-successful revocation will rotate the CRL.
+successful revocation rotates the CRL unless `auto_rebuild` is set to `true`
+ in [the CRL configuration](#set-revocation-configuration).
~> **Note**: This operation is privileged as it allows revocation of arbitrary
certificates based purely on their serial number. It does not validate that
@@ -1550,7 +1551,8 @@ request is authorized by an appropriate individual (Proof of Possession).
This is an alternative option to the standard method of revoking using Vault
lease IDs or revocation via serial number. A successful revocation will
-rotate the CRL.
+rotate the CRL unless `auto_rebuild` is set to true
+in [the CRL configuration](#set-revocation-configuration).
It is not possible to revoke issuers using this path.
@@ -4899,10 +4901,11 @@ $ curl \
```json
{
"data": {
+ "audit_fields": ["common_name", "alt_names", "ip_sans", "uri_sans"],
"authenticators": {
"cert": {
"accessor": "auth_cert_7fe0c1cc",
- "cert_role": ""
+ "cert_role": "est-ca"
},
"userpass": {
"accessor": "auth_userpass_2b333949"
@@ -4910,9 +4913,10 @@ $ curl \
},
"default_mount": true,
"default_path_policy": "sign-verbatim",
+ "enable_sentinel_parsing": true,
"enabled": true,
"label_to_path_policy": {
- "test-label": "roles/est-clients"
+ "test-label": "role:est-clients"
},
"last_updated": "2024-01-31T10:45:22-05:00"
}
@@ -4952,6 +4956,12 @@ updated values as a response along with an updated `last_updated` field.
- `enable_sentinel_parsing` `(bool: false)` - Parse out fields from the provided CSR making them available for
Sentinel policies.
+- `audit_fields` `(list: ["common_name", "alt_names", "ip_sans", "uri_sans"])` - Fields parsed from the CSR that
+ appear in the audit and can be used by sentinel policies. Allowed values are `csr`, `common_name`, `alt_names`,
+ `ip_sans`, `uri_sans`, `other_sans`, `signature_bits`, `exclude_cn_from_sans`, `ou`, `organization`, `country`,
+ `locality`, `province`, `street_address`, `postal_code`, `serial_number`, `use_pss`, `key_type`, `key_bits`,
+ `add_basic_constraints`
+
#### Sample Payload
```json
@@ -4970,7 +4980,9 @@ updated values as a response along with an updated `last_updated` field.
"userpass": {
"accessor": "auth_userpass_b2b08fac"
}
- }
+ },
+ "enable_sentinel_parsing": true,
+ "audit_fields": ["common_name", "alt_names", "ip_sans", "uri_sans"]
}
```
diff --git a/website/content/api-docs/system/internal-counters.mdx b/website/content/api-docs/system/internal-counters.mdx
index 10fce6ce4128..3aae794ffcbe 100644
--- a/website/content/api-docs/system/internal-counters.mdx
+++ b/website/content/api-docs/system/internal-counters.mdx
@@ -1007,7 +1007,7 @@ The `/sys/internal/counters/config` endpoint is used to configure logging of act
- `enabled` `(string: enable, disable, default)` - Enable or disable counting of client activity. When set to `default`, the client
counts are enabled on Enterprise builds and disabled on community builds. Disabling the feature during the middle of a month will
discard any data recorded for that month, but does not delete previous months.
-- `retention_months` `(integer: 48)` - The number of months of history to retain.
+- `retention_months` `(integer: 48)` - The number of months of history to retain. The minimum is 48 months and the maximum is 60 months.
Any missing parameters are left at their existing value.
diff --git a/website/content/docs/browser-support.mdx b/website/content/docs/browser-support.mdx
index 0d5e36127cc7..f7248e510e81 100644
--- a/website/content/docs/browser-support.mdx
+++ b/website/content/docs/browser-support.mdx
@@ -17,4 +17,6 @@ Vault currently supports all 'evergreen' and updated browsers. the following bro
!> **Warning**: Using an unsupported browser such as Internet Explorer 11 (IE 11) may cause degradation in feature functionality, and in some cases, Vault features may not operate. We encourage using one of the supported browsers listed for Vault UI.
+~> **Note**: Some UI features, such as the [Clipboard](https://developer.mozilla.org/en-US/docs/Web/API/Clipboard), require a [secure context](https://developer.mozilla.org/en-US/docs/Web/Security/Secure_Contexts). They may not function as expected if TLS is not enabled, or for some other reason a secure context is not available.
+
Please note that HashiCorp, in alignment with Microsoft's stance on IE 11, no longer supports Internet Explorer 11 (IE 11). For further information on IE 11, please reference Microsoft's [support site](https://docs.microsoft.com/en-US/lifecycle/faq/internet-explorer-microsoft-edge).
diff --git a/website/content/docs/commands/secrets/enable.mdx b/website/content/docs/commands/secrets/enable.mdx
index 350dc6bc5d6a..d26ab12bcd1b 100644
--- a/website/content/docs/commands/secrets/enable.mdx
+++ b/website/content/docs/commands/secrets/enable.mdx
@@ -90,7 +90,7 @@ flags](/vault/docs/commands) included on all commands.
- `-path` `(string: "")` Place where the secrets engine will be accessible. This
must be unique cross all secrets engines. This defaults to the "type" of the
secrets engine.
-
+
!> **Case-sensitive:** The path where you enable secrets engines is case-sensitive. For
example, the KV secrets engine enabled at `kv/` and `KV/` are treated as two
distinct instances of KV secrets engine.
@@ -105,8 +105,11 @@ flags](/vault/docs/commands) included on all commands.
- `-allowed-managed-keys` `(string: "")` - Managed key name(s) that the mount
in question is allowed to access. Note that multiple keys may be specified
- either by providing the key names as a comma separated string or by providing
- this option multiple times, each time with 1 key.
+ by providing this option multiple times, each time with 1 key.
+
+- `-delegated-auth-accessors` `(string: "")` - An authorized accessor the auth
+ backend can delegate authentication to. To allow multiple accessors, provide
+ the `delegated-auth-accessors` multiple times, each time with 1 accessor.
- `-plugin-version` `(string: "")` - Configures the semantic version of the plugin
to use. If unspecified, implies the built-in or any matching unversioned plugin
diff --git a/website/content/docs/commands/secrets/tune.mdx b/website/content/docs/commands/secrets/tune.mdx
index 4074374888c9..0bb31549f314 100644
--- a/website/content/docs/commands/secrets/tune.mdx
+++ b/website/content/docs/commands/secrets/tune.mdx
@@ -91,8 +91,11 @@ flags](/vault/docs/commands) included on all commands.
- `-allowed-managed-keys` `(string: "")` - Managed key name(s) that the mount
in question is allowed to access. Note that multiple keys may be specified
- either by providing the key names as a comma separated string or by providing
- this option multiple times, each time with 1 key.
+ by providing this option multiple times, each time with 1 key.
+
+- `-delegated-auth-accessors` `(string: "")` - An authorized accessor the auth
+ backend can delegate authentication to. To allow multiple accessors, provide
+ the `delegated-auth-accessors` multiple times, each time with 1 accessor.
- `-plugin-version` `(string: "")` - Configures the semantic version of the plugin
to use. The new version will not start running until the mount is
diff --git a/website/content/docs/commands/transform/import.mdx b/website/content/docs/commands/transform/import.mdx
new file mode 100644
index 000000000000..eb9e848fd8e7
--- /dev/null
+++ b/website/content/docs/commands/transform/import.mdx
@@ -0,0 +1,62 @@
+---
+layout: docs
+page_title: transform import and transform import-version - Command
+description: |-
+ The "transform import" and "transform import-version" commands import the
+ specified key into Transform, via the Transform BYOK mechanism.
+---
+
+# transform import and transform import-version
+
+The `transform import` and `transform import-version` commands import the
+specified key into Transform, via the [Transform BYOK
+mechanism](/vault/docs/secrets/transform#bring-your-own-key-byok). The former
+imports this key as a new key, failing if it already exists, whereas the
+latter will only update an existing key in Transform to a new version of the
+key material.
+
+This needs access to read the transform mount's wrapping key (at
+`transform/wrapping_key`) and the ability to write to either import
+endpoints (either `transform/keys/:name/import` or
+`transform/keys/:name/import_version`).
+
+## Examples
+
+Imports a 2048-bit RSA key as a new key:
+
+```
+$ vault transform import transform/keys/test-key @test-key type=rsa-2048
+Retrieving transform wrapping key.
+Wrapping source key with ephemeral key.
+Encrypting ephemeral key with transform wrapping key.
+Submitting wrapped key to Vault transform.
+Success!
+```
+
+Imports a new version of an existing key:
+
+```
+$ vault transform import-version transform/keys/test-key @test-key-updated
+Retrieving transform wrapping key.
+Wrapping source key with ephemeral key.
+Encrypting ephemeral key with transform wrapping key.
+Submitting wrapped key to Vault transform.
+Success!
+```
+
+## Usage
+
+This command does not have any unique flags and respects core Vault CLI
+commands. See `vault transform import -help` for more information.
+
+This command requires two positional arguments:
+
+ 1. `PATH`, the path to the transform key to import in the format of
+ `/keys/`, where `` is the path to the mount
+ (using `-namespace=` to specify any namespaces), and ``
+ is the desired name of the key.
+ 2. `KEY`, the key material to import in Standard Base64 encoding (either
+ of a raw key in the case of symmetric keys such as AES, or of the DER
+ encoded format for asymmetric keys such as RSA). If the value for `KEY`
+ begins with an `@`, the CLI argument is assumed to be a path to a file
+ on disk to be read.
diff --git a/website/content/docs/commands/transform/index.mdx b/website/content/docs/commands/transform/index.mdx
new file mode 100644
index 000000000000..f4a39af409e8
--- /dev/null
+++ b/website/content/docs/commands/transform/index.mdx
@@ -0,0 +1,32 @@
+---
+layout: docs
+page_title: transform - Command
+description: |-
+ The "transform" command groups subcommands for interacting with Vault's Transform
+ secrets engine.
+---
+
+# transform
+
+The `transform` command groups subcommands for interacting with Vault's
+[Transform Secrets Engine](/vault/docs/secrets/transform).
+
+## Syntax
+
+Option flags for a given subcommand are provided after the subcommand, but before the arguments.
+
+## Examples
+
+To [import](/vault/docs/commands/transform/import) keys into a mount via the
+[Transform BYOK](/vault/docs/secrets/transform#bring-your-own-key-byok)
+mechanism, use the `vault transform import ` or
+`vault transform import-version ` commands:
+
+```
+$ vault transform import transform/transformations/fpe/example @test-key type=rsa-2048
+Retrieving transform wrapping key.
+Wrapping source key with ephemeral key.
+Encrypting ephemeral key with transform wrapping key.
+Submitting wrapped key.
+Success!
+```
diff --git a/website/content/docs/commands/transit/index.mdx b/website/content/docs/commands/transit/index.mdx
index 72f8291eb07d..60b511968c57 100644
--- a/website/content/docs/commands/transit/index.mdx
+++ b/website/content/docs/commands/transit/index.mdx
@@ -27,6 +27,6 @@ $ vault transit import transit/keys/test-key @test-key type=rsa-2048
Retrieving transit wrapping key.
Wrapping source key with ephemeral key.
Encrypting ephemeral key with transit wrapping key.
-Submitting wrapped key to Vault transit.
+Submitting wrapped key.
Success!
```
diff --git a/website/content/docs/concepts/identity.mdx b/website/content/docs/concepts/identity.mdx
index 8ecb9c015a82..20d7e2eb97cf 100644
--- a/website/content/docs/concepts/identity.mdx
+++ b/website/content/docs/concepts/identity.mdx
@@ -105,7 +105,7 @@ a particular auth mount point.
| ------------------- | --------------------------------------------------------------------------------------------------- |
| AliCloud | Principal ID |
| AppRole | Role ID |
-| AWS IAM | Configurable via `iam_alias` to one of: Role ID (default), IAM unique ID, Full ARN |
+| AWS IAM | Configurable via `iam_alias` to one of: Role ID (default), IAM unique ID, Canonical ARN, Full ARN |
| AWS EC2 | Configurable via `ec2_alias` to one of: Role ID (default), EC2 instance ID, AMI ID |
| Azure | Subject (from JWT claim) |
| Cloud Foundry | App ID |
diff --git a/website/content/docs/concepts/seal.mdx b/website/content/docs/concepts/seal.mdx
index 6c44a8f3bc8b..28f5fe4929bc 100644
--- a/website/content/docs/concepts/seal.mdx
+++ b/website/content/docs/concepts/seal.mdx
@@ -213,8 +213,6 @@ versions as well.
### Migration post Vault 1.16.0 via Seal HA for Auto Seals (Enterprise)
-@include 'alerts/beta.mdx'
-
With Seal HA, migration between auto-unseal types (not including any Shamir
seals) can be done fully online using Seal High Availability (Seal HA) without
any downtime.
@@ -337,10 +335,8 @@ migration will not happen again on the peer nodes.
## Seal High Availability (Enterprise)
-@include 'alerts/beta.mdx'
-
-Seal High Availability (Seal HA) allows the configuration of more than one auto
-seal mechanism such that Vault can tolerate the temporary loss of a seal service
+Seal high availability (Seal HA) allows the configuration of more than one auto
+seal mechanism such that Vault can tolerate the temporary loss of a seal service
or device for a time. With Seal HA configured with at least two and no more than
three auto seals, Vault can also start up and unseal if one of the
configured seals is still available (though Vault will remain in a degraded mode in
diff --git a/website/content/docs/configuration/listener/tcp.mdx b/website/content/docs/configuration/listener/tcp/index.mdx
similarity index 91%
rename from website/content/docs/configuration/listener/tcp.mdx
rename to website/content/docs/configuration/listener/tcp/index.mdx
index 2a19e702378c..3617a7be7e2f 100644
--- a/website/content/docs/configuration/listener/tcp.mdx
+++ b/website/content/docs/configuration/listener/tcp/index.mdx
@@ -1,7 +1,7 @@
---
layout: docs
page_title: TCP - Listeners - Configuration
-description: |-
+description: >-
The TCP listener configures Vault to listen on the specified TCP address and
port.
---
@@ -47,6 +47,44 @@ also omit keys from the response when the corresponding value is empty (`""`).
settings will apply to CLI and UI output in addition to direct API calls.
+## Default TLS configuration
+
+By default, Vault TCP listeners only accept TLS 1.2 or 1.3 connections and will
+drop connection requests from clients using TLS 1.0 or 1.1.
+
+Vault uses the following ciphersuites by default:
+
+- **TLS 1.3** - `TLS_AES_128_GCM_SHA256`, `TLS_AES_256_GCM_SHA384`, or `TLS_CHACHA20_POLY1305_SHA256`.
+- **TLS 1.2** - depends on whether you configure Vault with a RSA or ECDSA certificate.
+
+You can configure Vault with any cipher supported by the
+[`tls`](https://pkg.go.dev/crypto/tls) and
+[`tlsutil`](https://github.com/hashicorp/go-secure-stdlib/blob/main/tlsutil/tlsutil.go#L31-L57)
+Go packages. Vault uses the `tlsutil` package to parse ciphersuite configurations.
+
+
+
+ The Go team and HashiCorp believe that the set of cyphers supported by `tls`
+ and `tlsutil` is appropriate for modern, secure usage. However, some
+ vulnerability scanners may flag issues with your configuration.
+
+ In particular, Sweet32 (CVE-2016-2183) is an attack against 64-bit block size
+ ciphers including 3DES that may allow an attacker to break the encryption of
+ long lived connections. According to the
+ [vulnerability disclosure](https://sweet32.info/), Sweet32 took a
+ single HTTPS session with 785 GB of traffic to break the encryption.
+
+ As of May 2024, the Go team does not believe the risk of Sweet32 is sufficient
+ to remove existing client compatibility by deprecating 3DES support, however,
+ the team did [de-prioritize 3DES](https://github.com/golang/go/issues/45430)
+ in favor of AES-based ciphers.
+
+
+
+Before overriding Vault defaults, we recommend reviewing the recommended Go team
+[approach to TLS configuration](https://go.dev/blog/tls-cipher-suites) with
+particular attention to their ciphersuite selections.
+
## Listener's custom response headers
As of version 1.9, Vault supports defining custom HTTP response headers for the root path (`/`) and also on API endpoints (`/v1/*`).
@@ -119,7 +157,7 @@ default value in the `"/sys/config/ui"` [API endpoint](/vault/api-docs/system/co
request size, in bytes. Defaults to 32 MB if not set or set to `0`.
Specifying a number less than `0` turns off limiting altogether.
-- `max_request_duration` `(string: "90s")` – Specifies the maximum
+- `max_request_duration` `(string: "90s")` – Specifies the maximum
request duration allowed before Vault cancels the request. This overrides
`default_max_request_duration` for this listener.
@@ -148,7 +186,8 @@ default value in the `"/sys/config/ui"` [API endpoint](/vault/api-docs/system/co
- `tls_disable` `(string: "false")` – Specifies if TLS will be disabled. Vault
assumes TLS by default, so you must explicitly disable TLS to opt-in to
- insecure communication.
+ insecure communication. Disabling TLS can **disable** some UI functionality. See
+ the [Browser Support](/vault/docs/browser-support) page for more details.
- `tls_cert_file` `(string: , reloads-on-SIGHUP)` –
Specifies the path to the certificate for TLS. It requires a PEM-encoded file.
@@ -176,7 +215,7 @@ default value in the `"/sys/config/ui"` [API endpoint](/vault/api-docs/system/co
- `tls_max_version` `(string: "tls13")` – Specifies the maximum supported
version of TLS. Accepted values are "tls10", "tls11", "tls12" or "tls13".
-~> **Warning**: TLS 1.1 and lower (`tls10` and `tls11` values for the
+ ~> **Warning**: TLS 1.1 and lower (`tls10` and `tls11` values for the
`tls_min_version` and `tls_max_version` parameters) are widely considered
insecure.
@@ -282,6 +321,7 @@ This example shows enabling a TLS listener.
```hcl
listener "tcp" {
+ address = "127.0.0.1:8200"
tls_cert_file = "/etc/certs/vault.crt"
tls_key_file = "/etc/certs/vault.key"
}
@@ -545,4 +585,4 @@ Raft Applied Index 219
[golang-tls]: https://golang.org/src/crypto/tls/cipher_suites.go
[api-addr]: /vault/docs/configuration#api_addr
[cluster-addr]: /vault/docs/configuration#cluster_addr
-[go-tls-blog]: https://go.dev/blog/tls-cipher-suites
+[go-tls-blog]: https://go.dev/blog/tls-cipher-suites
\ No newline at end of file
diff --git a/website/content/docs/configuration/listener/tcp/tcp-tls.mdx b/website/content/docs/configuration/listener/tcp/tcp-tls.mdx
new file mode 100644
index 000000000000..90b356bec14e
--- /dev/null
+++ b/website/content/docs/configuration/listener/tcp/tcp-tls.mdx
@@ -0,0 +1,208 @@
+---
+layout: docs
+page_title: Configure TLS for your Vault TCP listener
+description: >-
+ Example TCP listener configuration with TLS encryption.
+---
+
+# Configure TLS for your Vault TCP listener
+
+You can configure your TCP listener to use specific versions of TLS and specific
+ciphersuites.
+
+## Assumptions
+
+- **Your Vault instance is not currently running**. If your Vault cluster is
+ running, you must
+ [restart the cluster gracefully](https://support.hashicorp.com/hc/en-us/articles/17169701076371-A-Step-by-Step-Guide-to-Restarting-a-Vault-Cluster)
+ to apply changes to your TCP listener. SIGHIP will not reload your TLS
+ configuration.
+- **You have a valid TLS certificate file**.
+- **You have a valid TLS key file**.
+- **You have a valid CA file (if required)**.
+
+## Example TLS 1.3 configuration
+
+If a reasonably modern set of clients are connecting to a Vault instance, you
+can configure the `tcp` listener stanza to only accept TLS 1.3 with the
+`tls_min_version` parameter:
+
+
+
+```plaintext
+listener "tcp" {
+ address = "127.0.0.1:8200"
+ tls_cert_file = "cert.pem"
+ tls_key_file = "key.pem"
+ tls_min_version = "tls13"
+}
+```
+
+
+
+Vault does not accept explicit ciphersuite configuration for TLS 1.3 because the
+Go team has already designated a select set of ciphers that align with the
+broadly-accepted Mozilla Security/Server Side TLS guidance for [modern TLS
+configuration](https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility).
+
+## Example TLS 1.2 configuration
+
+To use TLS 1.2 with a non-default set of ciphersuites, you can set 1.2 as the
+minimum and maximum allowed TLS version and explicitly define your preferred
+ciphersuites with `tls_ciper_suites` and one or more of the ciphersuite
+constants from the ciphersuite configuration parser. For example:
+
+
+
+```plaintext
+listener "tcp" {
+ address = "127.0.0.1:8200"
+ tls_cert_file = "cert.pem"
+ tls_key_file = "key.pem"
+ tls_min_version = "tls12"
+ tls_max_version = "tls12"
+ tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256"
+}
+```
+
+
+
+You must set the minimum and maximum TLS version to disable TLS 1.3, which does
+not support explicit cipher selection. The priority order of the ciphersuites
+in `tls_cipher_suites` is determined by the `tls` Go package.
+
+
+
+ The TLS 1.2 configuration example excludes any 3DES ciphers to avoid potential
+ exposure to the Sweet32 attack (CVE-2016-2183). You should customize the
+ ciphersuite list as needed to meet your environment-specific security
+ requirements.
+
+
+
+## Verify your TLS configuration
+
+You can verify your TLS configuration using an SSL scanner such as
+[`sslscan`](https://github.com/rbsec/sslscan).
+
+
+
+
+
+
+```shell-session
+$ sslscan 127.0.0.1:8200
+Version: 2.1.3
+OpenSSL 3.2.1 30 Jan 2024
+
+Connected to 127.0.0.1
+
+Testing SSL server 127.0.0.1 on port 8200 using SNI name 127.0.0.1
+
+ SSL/TLS Protocols:
+SSLv2 disabled
+SSLv3 disabled
+TLSv1.0 disabled
+TLSv1.1 disabled
+TLSv1.2 enabled
+TLSv1.3 enabled
+
+ TLS Fallback SCSV:
+Server supports TLS Fallback SCSV
+
+ TLS renegotiation:
+Session renegotiation not supported
+
+ TLS Compression:
+Compression disabled
+
+ Heartbleed:
+TLSv1.3 not vulnerable to heartbleed
+TLSv1.2 not vulnerable to heartbleed
+
+ Supported Server Cipher(s):
+Preferred TLSv1.3 128 bits TLS_AES_128_GCM_SHA256 Curve 25519 DHE 253
+Accepted TLSv1.3 256 bits TLS_AES_256_GCM_SHA384 Curve 25519 DHE 253
+Accepted TLSv1.3 256 bits TLS_CHACHA20_POLY1305_SHA256 Curve 25519 DHE 253
+Preferred TLSv1.2 128 bits ECDHE-ECDSA-AES128-GCM-SHA256 Curve 25519 DHE 253
+Accepted TLSv1.2 256 bits ECDHE-ECDSA-AES256-GCM-SHA384 Curve 25519 DHE 253
+Accepted TLSv1.2 256 bits ECDHE-ECDSA-CHACHA20-POLY1305 Curve 25519 DHE 253
+Accepted TLSv1.2 128 bits ECDHE-ECDSA-AES128-SHA Curve 25519 DHE 253
+Accepted TLSv1.2 256 bits ECDHE-ECDSA-AES256-SHA Curve 25519 DHE 253
+
+ Server Key Exchange Group(s):
+TLSv1.3 128 bits secp256r1 (NIST P-256)
+TLSv1.3 192 bits secp384r1 (NIST P-384)
+TLSv1.3 260 bits secp521r1 (NIST P-521)
+TLSv1.3 128 bits x25519
+TLSv1.2 128 bits secp256r1 (NIST P-256)
+TLSv1.2 192 bits secp384r1 (NIST P-384)
+TLSv1.2 260 bits secp521r1 (NIST P-521)
+TLSv1.2 128 bits x25519
+
+ SSL Certificate:
+Signature Algorithm: ecdsa-with-SHA256
+ECC Curve Name: prime256v1
+ECC Key Strength: 128
+
+Subject: localhost
+Issuer: localhost
+
+Not valid before: May 17 17:27:29 2024 GMT
+Not valid after: Jun 16 17:27:29 2024 GMT
+```
+
+
+
+
+
+
+
+
+```shell-session
+sslscan 127.0.0.1:8200
+Testing SSL server 127.0.0.1 on port 8200 using SNI name 127.0.0.1
+
+ SSL/TLS Protocols:
+SSLv2 disabled
+SSLv3 disabled
+TLSv1.0 disabled
+TLSv1.1 disabled
+TLSv1.2 enabled
+TLSv1.3 enabled
+
+ Supported Server Cipher(s):
+Preferred TLSv1.3 128 bits TLS_AES_128_GCM_SHA256 Curve 25519 DHE 253
+Accepted TLSv1.3 256 bits TLS_AES_256_GCM_SHA384 Curve 25519 DHE 253
+Accepted TLSv1.3 256 bits TLS_CHACHA20_POLY1305_SHA256 Curve 25519 DHE 253
+Preferred TLSv1.2 128 bits ECDHE-RSA-AES128-GCM-SHA256 Curve 25519 DHE 253
+Accepted TLSv1.2 256 bits ECDHE-RSA-AES256-GCM-SHA384 Curve 25519 DHE 253
+Accepted TLSv1.2 256 bits ECDHE-RSA-CHACHA20-POLY1305 Curve 25519 DHE 253
+Accepted TLSv1.2 128 bits ECDHE-RSA-AES128-SHA Curve 25519 DHE 253
+Accepted TLSv1.2 256 bits ECDHE-RSA-AES256-SHA Curve 25519 DHE 253
+Accepted TLSv1.2 128 bits AES128-GCM-SHA256
+Accepted TLSv1.2 256 bits AES256-GCM-SHA384
+Accepted TLSv1.2 128 bits AES128-SHA
+Accepted TLSv1.2 256 bits AES256-SHA
+Accepted TLSv1.2 112 bits TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
+Accepted TLSv1.2 112 bits TLS_RSA_WITH_3DES_EDE_CBC_SHA
+
+ Server Key Exchange Group(s):
+TLSv1.3 128 bits secp256r1 (NIST P-256)
+TLSv1.3 192 bits secp384r1 (NIST P-384)
+TLSv1.3 260 bits secp521r1 (NIST P-521)
+TLSv1.3 128 bits x25519
+TLSv1.2 128 bits secp256r1 (NIST P-256)
+TLSv1.2 192 bits secp384r1 (NIST P-384)
+TLSv1.2 260 bits secp521r1 (NIST P-521)
+TLSv1.2 128 bits x25519
+
+ SSL Certificate:
+Signature Algorithm: sha256WithRSAEncryption
+RSA Key Strength: 4096
+```
+
+
+
+
+
\ No newline at end of file
diff --git a/website/content/docs/configuration/programmatic-best-practices.mdx b/website/content/docs/configuration/programmatic-best-practices.mdx
new file mode 100644
index 000000000000..44eef8976788
--- /dev/null
+++ b/website/content/docs/configuration/programmatic-best-practices.mdx
@@ -0,0 +1,89 @@
+---
+layout: docs
+page_title: Programmatic best practices
+description: >-
+ Best practices for managing Vault programmatically.
+---
+
+# Best practices for programmatic Vault management
+
+It can be a challenge to enforce appropriate governance policies on resources
+created directly from the Vault CLI or API. The best way to manage Vault at
+scale is to
+[codify resource management](/vault/tutorials/operations/codify-mgmt-vault-terraform)
+with [Terraform](/terraform) and the
+[Terraform Vault provider](https://registry.terraform.io/providers/hashicorp/vault/latest).
+
+Terraform applies policy and governance using infrastructure as code (IaC) so
+you can programmatically manage Vault resources such as authentication methods,
+plugins, namespaces, and policies. For example, if you have have specific ACL or
+Sentinel policies that must apply to every Vault namespace, you can rely on
+Terraform to apply the correct governance policies every time you create a new
+namespace.
+
+## Terraform recommendations
+
+- **Avoid reading or writing long-lived static secrets to Vault from Terraform**.
+ Data read to, or written from, Terraform persists in the Terraform state file
+ and any generated plan files.
+
+- **Encrypt the Terraform state file**. Protect the state file with a secure,
+ encrypted backend.
+
+- **Follow the principle of least privilege**. Limit who has read/write access
+ to your Terraform state file.
+
+- **Limit direct management of Vault resources**. Use
+ [Sentinel policies](https://go.hashi.co/support-block-ns-manipulation-with-sentinel)
+ to limit resource management permissions for resources that should be managed
+ through Terraform.
+
+- **Use short-lived credentials**. Credentials persist in the Terraform state
+ file. Short-lived credentials reduce the risk of exposure in the event of a
+ compromised state file.
+
+## Vault recommendations
+
+- **Use Vault-backed dynamic credentials for different cloud providers**.
+ [Vault-backed dynamic credentials](/terraform/cloud-docs/workspaces/dynamic-provider-credentials/vault-backed)
+ limits the need for unique dynamic provider credentials across different cloud
+ providers so you can centralize sensitive data management with Vault while
+ generating short-lived credentials with multiple cloud providers.
+
+- **Use the namespace attribute in resources and data sources**. Using the
+ namespace attribute of a resource, instead of a provider alias, simplifies
+ your configuration and avoids the need for multiple provider blocks.
+
+- **Use distinct token capabilities**. Use the minimal required Vault token
+ capabilities to manage your Vault resources. For example, reading data from a
+ KV data source and drift detection functionality only require the `read`
+ capability. But managing the KV resource requires the `create` or `update`
+ capability depending on whether the resource already exists. And removing the
+ KV resource from the Terraform configuration requires the `delete` capability.
+
+- **Migrate existing resources to Terraform**. If you created Vault resources
+ outside of your Terraform provisioning workflow,
+ [migrate the un-managed resources](/vault/docs/configuration/programmatic-management#migrate).
+
+- **Use dynamic provider credentials for Vault when possible**.
+ [Dynamic provider credentials](/terraform/cloud-docs/workspaces/dynamic-provider-credentials/vault-configuration)
+ generate short-lived credentials as needed, which limits the need for static
+ credentials and improves the security posture of your integration.
+
+- **Do not pass `address`, `token`, or `namespace` to the provider configuration block**.
+ When you use dynamic provider credentials, Terraform populates the environment
+ variable, `TFC_VAULT_ADDR` with `address` and the workspace environment
+ variable, `TFC_VAULT_NAMESPACE`, with `namespace`. Terraform then uses the
+ environment variables to retrieve a value for `token`.
+
+- **Do not hardcode Vault credentials**. If you cannot use dynamic provider
+ credentials configure the Vault provider with environment variables.
+
+
+
+ If you use dynamic credentials with the Terraform Vault provider, it means that
+ Terraform manages the Vault token lifecycle. As a result, Vault **does not**
+ create a child token and you cannot use provider arguments for managing the
+ Vault child token (e.g., `token_name` argument).
+
+
diff --git a/website/content/docs/configuration/programmatic-management.mdx b/website/content/docs/configuration/programmatic-management.mdx
new file mode 100644
index 000000000000..a04eb8b14c3e
--- /dev/null
+++ b/website/content/docs/configuration/programmatic-management.mdx
@@ -0,0 +1,463 @@
+---
+layout: docs
+page_title: Manage Vault resources programmatically
+description: >-
+ Step-by-step instructions for managing Vault resources programmatically with
+ Terraform
+---
+
+# Manage Vault resources programmatically with Terraform
+
+Use Terraform to manage policies, namespaces, and plugins in Vault.
+
+## Before you start
+
+- **You must have [Terraform installed](/terraform/install)**.
+- **You must have the [Terraform Vault provider](https://registry.terraform.io/providers/hashicorp/vault/latest) configured**.
+- **You must have admin access to your Terraform installation**. If you do not
+ have admin access, you can still generate the relevant configuration files,
+ but you will need to have someone else apply the changes.
+- **You must have a [Vault server running](/vault/tutorials/getting-started/getting-started-dev-server)**.
+
+## Step 1: Create a resource file for namespaces
+
+Terraform Vault provider supports a `vault_namespace` resource type for
+managing Vault namespaces:
+
+```hcl
+resource "vault_namespace" "" {
+ path = ""
+}
+```
+
+To manage your Vault namespaces in Terraform:
+
+1. Use the `vault namespace list` command to identify any unmanaged namespaces
+ that you need to migrate. For example:
+
+ ```shell-session
+ $ vault namespace list
+
+ Keys
+ ----
+ admin/
+ ```
+
+1. Create a new Terraform Vault Provider resource file called
+ `vault_namespaces.tf` that defines `vault_namespace` resources for each of
+ the new or existing namespaces resources you want to manage.
+
+ For example, to migrate the `admin` namespace in the example and create a new
+ `dev` namespace:
+
+ ```hcl
+ resource "vault_namespace" "admin_ns" {
+ path = "admin"
+ }
+
+ resource "vault_namespace" "dev_ns" {
+ path = "dev"
+ }
+ ```
+
+## Step 2: Create a resource file for secret engines
+
+Terraform Vault provider supports discrete types for the different
+[auth](https://registry.terraform.io/providers/hashicorp/vault/latest/docs#vault-authentication-configuration-options),
+[secret](https://registry.terraform.io/providers/hashicorp/vault/latest/docs/resources/mount),
+and [database](https://registry.terraform.io/providers/hashicorp/vault/latest/docs/resources/database_secrets_mount)
+plugin types in Vault.
+
+To migrate a secret engine, use the `vault_mount` resource type:
+
+```hcl
+resource "vault_mount" "" {
+ path = ""
+ type = ""
+}
+```
+
+To manage your Vault secret engines in Terraform:
+
+1. Use the `vault secret list` command to identify any unmanaged secret engines
+ that you need to migrate. For example:
+
+ ```shell-session
+ $ vault secrets list | grep -vEw '(cubbyhole|identity|sys)'
+
+ Path Type Accessor Description
+ ---- ---- -------- -----------
+ transit/ transit transit_8291b949 n/a
+ ```
+
+1. Use the `-namespace` flag to check for unmanaged secret engines under any
+ namespaces you identified in the previous step. For example, to check for
+ secret engines under the `admin` namespace:
+
+ ```shell-session
+ $ vault secrets list -namespace=admin | grep -vEw '(cubbyhole|identity|sys)'
+
+ Path Type Accessor Description
+ ---- ---- -------- -----------
+ admin_keys/ kv kv_87edfc65 n/a
+ ```
+
+1. Create a new Terraform Vault Provider resource file called `vault_secrets.tf`
+ that defines `vault_mount` resources for each of the new or existing secret
+ engines you want to manage.
+
+ For example, to migrate the `transit` and `admin_keys` secret engines in the
+ example and enable a new `kv` engine under the new `dev` namespace called
+ `dev_keys`:
+
+ ```hcl
+ resource "vault_mount" "transit_plugin" {
+ path = "transit"
+ type = "transit"
+ }
+
+ resource "vault_mount" "admin_keys_plugin" {
+ namespace = vault_namespace.admin_ns.path
+ path = "admin_keys"
+ type = "kv"
+ options = {
+ version = "2"
+ }
+ }
+
+ resource "vault_mount" "dev_keys_plugin" {
+ namespace = vault_namespace.dev_ns.path
+ path = "dev_keys"
+ type = "kv"
+ options = {
+ version = "2"
+ }
+ }
+ ```
+
+## Step 3: Create a resource file for policies
+
+Terraform Vault provider supports a `vault_policy` resource type for
+managing Vault policies:
+
+```hcl
+resource "vault_policy" "" {
+ name = ""
+ policy = <
+ EOT
+}
+```
+
+To manage your Vault policies in Terraform:
+
+1. Use the `vault policy list` command to identify any unmanaged policies that
+ you need to migrate. For example:
+
+ ```shell-session
+ $ vault policy list | grep -vEw 'root'
+
+ default
+ ```
+
+1. Create a Terraform Vault Provider resource file called `vault_policies.tf`
+ that defines `vault_mount` resources for each policy resource you want to
+ manage in Terraform. You can use the following `bash` code to write all
+ your existing, non-root policies to the file:
+
+ ```shell-session
+ for vpolicy in $(vault policy list | grep -vw root) ; do
+ echo "resource \"vault_policy\" \"vault_$vpolicy\" {"
+ echo " name = \"$vpolicy\""
+ echo " policy = < vault_policies.tf
+ ```
+
+1. Update the `vault_policies.tf` file with any new policies you want to add.
+ For example, to create a policy for the example `dev_keys` secret engine:
+
+ ```hcl
+ resource "vault_policy" "dev_team_policy" {
+ name = "dev_team"
+
+ policy = <-
+ Instructions and best practices for cluster design with Vault Enterprise.
+---
+
+# Design your Vault Enterprise cluster
+
+@include 'alerts/enterprise-only.mdx'
+
+If you want to implement a robust Vault cluster, you need to understand how
+Vault manages clusters, storage, and replication.
+
+## Before you start
+
+- **Complete the [HashiCorp Enterprise Academy Onboarding](https://onboarding.hashicorp.com/trainings) for Vault**.
+- **Watch the [Raft consensus demo](https://thesecretlivesofdata.com/raft/) demo**.
+
+## Step 1: Plan your cluster architecture
+
+1. Review the [Integrated storage](/vault/docs/concepts/integrated-storage)
+ overview to learn the basics about Vault integrated storage.
+1. Review the [Vault multi-cluster architecture guide](/well-architected-framework/zero-trust-security/multi-cluster-architecture)
+ to learn the best practices for running multiple Vault clusters.
+1. Review the [Vault Enterprise replication overview](/vault/docs/enterprise/replication)
+ to learn the differences between performance replication and disaster
+ recovery replication.
+1. Review the [Vault with integrated storage reference architecture](/well-architected-framework/zero-trust-security/raft-reference-architecture)
+ guide to learn the best practices for using Vault integrated storage
+ in a zero-trust security posture.
+
+## Step 2: Review anti-patterns for Vault configuration
+
+You can help keep your Vault environments healthy by avoiding established
+anti-patterns.
+
+The Hashicorp Well-architected framework documentation provides in-depth
+[Vault anti-patterns](/well-architected-framework/operational-excellence/security-vault-anti-patterns) guidance based on
+lessons learned by customers operating Vault in the field.
+
+## Step 3: Plan for maintenance at scale
+
+The easiest way to run Vault at scale is to manage Vault programmatically with
+Terraform and Sentinel.
+
+1. Review the [Terraform intro](/terraform/intro).
+1. Review the [Programmatic best practices](/vault/docs/configuration/programmatic-best-practices)
+ guide to learn about managing Vault through Terraform.
+1. Review the [Vault namespaces recommendations](/vault/tutorials/enterprise/namespace-structure#use-namespaces-sparingly).
\ No newline at end of file
diff --git a/website/content/docs/enterprise/mfa/index.mdx b/website/content/docs/enterprise/mfa/index.mdx
index 0c3916b706c7..27daca8111f0 100644
--- a/website/content/docs/enterprise/mfa/index.mdx
+++ b/website/content/docs/enterprise/mfa/index.mdx
@@ -18,22 +18,22 @@ Vault.
MFA in Vault can be of the following types.
-- `Time-based One-time Password (TOTP)` - If configured and enabled on a path,
+- **Time-based One-time Password (TOTP)** - If configured and enabled on a path,
this would require a TOTP passcode along with Vault token, to be presented
while invoking the API request. The passcode will be validated against the
TOTP key present in the identity of the caller in Vault.
-- `Okta` - If Okta push is configured and enabled on a path, then the enrolled
+- **Okta** - If Okta push is configured and enabled on a path, then the enrolled
device of the user will get a push notification to approve or deny the access
to the API. The Okta username will be derived from the caller identity's
alias.
-- `Duo` - If Duo push is configured and enabled on a path, then the enrolled
+- **Duo** - If Duo push is configured and enabled on a path, then the enrolled
device of the user will get a push notification to approve or deny the access
to the API. The Duo username will be derived from the caller identity's
alias.
-- `PingID` - If PingID push is configured and enabled on a path, then the
+- **PingID** - If PingID push is configured and enabled on a path, then the
enrolled device of the user will get a push notification to approve or deny
the access to the API. The PingID username will be derived from the caller
identity's alias.
@@ -110,6 +110,13 @@ $ curl \
http://127.0.0.1:8200/v1/secret/foo
```
-### API
+## API
MFA can be managed entirely over the HTTP API. Please see [MFA API](/vault/api-docs/system/mfa) for more details.
+
+## Additional resources
+
+- [Duo MFA documentation](/vault/docs/enterprise/mfa/mfa-duo)
+- [Okta MFA documentation](/vault/docs/enterprise/mfa/mfa-okta)
+- [PingID MFA documentation](/vault/docs/enterprise/mfa/mfa-pingid)
+- [TOTP MFA documentation](/vault/docs/enterprise/mfa/mfa-totp)
diff --git a/website/content/docs/enterprise/pkcs11-provider/index.mdx b/website/content/docs/enterprise/pkcs11-provider/index.mdx
index 199b4774be39..40fd82fc02aa 100644
--- a/website/content/docs/enterprise/pkcs11-provider/index.mdx
+++ b/website/content/docs/enterprise/pkcs11-provider/index.mdx
@@ -11,8 +11,8 @@ description: |-
@include 'alerts/enterprise-only.mdx'
--> **Note**: This feature is part of the [KMIP Secret Engine](/vault/docs/secrets/kmip), which requires [Vault Enterprise](https://www.hashicorp.com/products/vault/)
-with the Advanced Data Protection Module.
+PKCS11 provider is part of the [KMIP Secret Engine](/vault/docs/secrets/kmip), which requires [Vault Enterprise](https://www.hashicorp.com/products/vault/pricing)
+with the Advanced Data Protection (ADP) module.
[PKCS#11](http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/os/pkcs11-base-v2.40-os.html)
is an open standard C API that provides a means to access cryptographic capabilities on a device.
diff --git a/website/content/docs/enterprise/sentinel/properties.mdx b/website/content/docs/enterprise/sentinel/properties.mdx
index b40582bb79da..6dd3b208e679 100644
--- a/website/content/docs/enterprise/sentinel/properties.mdx
+++ b/website/content/docs/enterprise/sentinel/properties.mdx
@@ -42,12 +42,12 @@ The following properties are available in the `request` namespace.
### Replication properties
-The following properties exists at the `replication.mode` namespace.
+The following properties exists at the `replication` namespace.
| Name | Type | Description |
| :------------ | :------- | :------------------------------------------------------------------------------------------------------------- |
-| `dr` | `string` | The state of DR replication. Valid values are "disabled", "bootstrapping", "primary", and "secondary" |
-| `replication` | `string` | The state of performance replication. Valid values are "disabled", "bootstrapping", "primary", and "secondary" |
+| `dr.mode` | `string` | The state of DR replication. Valid values are "disabled", "bootstrapping", "primary", and "secondary" |
+| `performance.mode` | `string` | The state of performance replication. Valid values are "disabled", "bootstrapping", "primary", and "secondary" |
## Token properties
diff --git a/website/content/docs/internals/limits.mdx b/website/content/docs/internals/limits.mdx
index c8abc9e21a46..02fcbc5eed07 100644
--- a/website/content/docs/internals/limits.mdx
+++ b/website/content/docs/internals/limits.mdx
@@ -195,7 +195,7 @@ This limit depends on the key size.
| ecdsa-p523 keys | 539 | 1078 |
| 1024-bit RSA keys | 169 | 333 |
| 2048-bit RSA keys | 116 | 233 |
-| 4096-bit RSA kyes | 89 | 178 |
+| 4096-bit RSA keys | 89 | 178 |
## Other limits
diff --git a/website/content/docs/internals/recommended-patterns.mdx b/website/content/docs/internals/recommended-patterns.mdx
new file mode 100644
index 000000000000..711ad73dc9a3
--- /dev/null
+++ b/website/content/docs/internals/recommended-patterns.mdx
@@ -0,0 +1,287 @@
+---
+layout: docs
+page_title: Recommended patterns
+description: Follow these recommended patterns to effectively operate Vault.
+---
+
+# Recommended patterns
+
+Help keep your Vault environments operating effectively by implementing the following best practice so you avoid common anti-patterns.
+
+| Description | Applicable Vault edition |
+|--- |--- |
+| [Adjust the default lease time](#adjust-the-default-lease-time) | All |
+| [Use identity entities for accurate client count](#use-identity-entities-for-accurate-client-count) | Enterprise, HCP |
+| [Increase IOPS](#increase-iops) | Enterprise, Community |
+| [Enable disaster recovery](#enable-disaster-recovery) | Enterprise |
+| [Test disaster recovery](#test-disaster-recovery) | Enterprise |
+| [Improve upgrade cadence](#improve-upgrade-cadence) | Enterprise, Community |
+| [Test before upgrades](#test-before-upgrades) | Enterprise, Community |
+| [Rotate audit device logs](#rotate-audit-device-logs) | Enterprise, Community |
+| [Monitor metrics](#monitor-metrics) | Enterprise, Community |
+| [Establish usage baseline](#establish-usage-baseline) | Enterprise, Community |
+| [Minimize root token use](#minimize-root-token-use) | All |
+| [Rekey when necessary](#rekey-when-necessary) | All |
+
+## Adjust the default lease time
+
+The default lease time in Vault is 32 days or 768 hours. This time allows for some operations, such as re-authentication or renewal.
+See [lease](/vault/docs/concepts/lease) documentation for more information.
+
+**Recommended pattern:**
+
+You should tune the lease TTL value for your needs. Vault holds leases in memory until the lease expires.
+We recommend keeping TTLs as short as the use case will allow.
+- [Auth tune](/vault/docs/commands/auth/tune)
+- [Secrets tune](/vault/docs/commands/secrets/tune)
+
+
+Tuning or adjusting TTLs does not retroactively affect tokens that were issued. New tokens must be issued after tuning TTLs.
+
+
+**Anti-pattern issue:**
+
+If you create leases without changing the default time-to-live (TTL), leases will live in Vault until the default lease time is up.
+Depending on your infrastructure and available system memory, using the default or long TTL may cause performance issues as Vault stores
+leases in memory.
+
+## Use identity entities for accurate client count
+
+Each Vault client may have multiple accounts with the auth methods enabled on the Vault server.
+
+![Entity](/img/vault-entity-waf1.png)
+
+**Recommended pattern:**
+
+Since each token adds to the client count, and each unique authentication issues a token, you should use identity entities to create aliases that connect each login to a single identity.
+
+ - [Client count](/vault/docs/concepts/client-count)
+ - [Vault identity concepts](/vault/docs/concepts/identity)
+ - [Vault Identity secrets engine](/vault/docs/secrets/identity)
+ - [Identity: Entities and groups tutorial](/vault/tutorials/auth-methods/identity)
+
+**Anti-pattern issue:**
+
+When you do not use identity entities, each new client is counted as a separate identity when using another auth method not linked to the user's entity.
+
+## Increase IOPS
+
+IOPS (input/output operations per second) measures performance for Vault cluster members. Vault is bound by the IO limits of the storage backend rather than the compute requirements.
+
+**Recommended pattern:**
+
+Use the HashiCorp reference guidelines for Vault servers' hardware sizing and network considerations.
+
+- [Vault with Integrated storage reference architecture](/vault/tutorials/day-one-raft/raft-reference-architecture#system-requirements)
+- [Performance tuning](/vault/tutorials/operations/performance-tuning)
+- [Transform secrets engine](/vault/docs/concepts/transform)
+
+
+
+Depending on the client count, the Transform (Enterprise) and Transit secret engines can be resource-intensive.
+
+
+
+**Anti-pattern issue:**
+
+Limited IOPS can significantly degrade Vault’s performance.
+
+## Enable disaster recovery
+
+HashiCorp Vault's (HA) highly available [Integrated storage (Raft)](/vault/docs/concepts/integrated-storage)
+backend provides intra-cluster data replication across cluster members. Integrated Storage provides Vault with
+horizontal scalability and failure tolerance, but it does not provide backup for the entire cluster. Not utilizing
+disaster recovery for your production environment will negatively impact your organization's Recovery Point
+Objective (RPO) and Recovery Time Objective (RTO).
+
+**Recommended pattern:**
+
+For cluster-wide issues (i.e., network connectivity), Vault Enterprise Disaster Recovery (DR) replication
+provides a warm standby cluster containing all primary cluster data. The DR cluster does not service reads
+or writes but you can promote it to replace the primary cluster when needed.
+
+- [Disaster recovery replication setup](/vault/tutorials/day-one-raft/disaster-recovery)
+- [Disaster recovery (DR) replication](/vault/docs/enterprise/replication#disaster-recovery-dr-replication)
+- [DR replication API documentation](/vault/api-docs/system/replication/replication-dr)
+
+We also recommend periodically creating data snapshots to protect against data corruption.
+
+- [Vault data backup standard procedure](/vault/tutorials/standard-procedures/sop-backup)
+- [Automated integrated storage snapshots](/vault/docs/enterprise/automated-integrated-storage-snapshots)
+- [/sys/storage/raft/snapshot-auto](/vault/api-docs/system/storage/raftautosnapshots)
+
+**Anti-pattern issue:**
+
+If you do not enable disaster recovery and catastrophic failure occurs, your use cases will encounter longer downtime duration and costs associated with not serving Vault clients in your environment.
+
+## Test disaster recovery
+
+Your disaster recovery (DR) solution is a key part of your overall disaster recovery plan.
+
+Designing and configuring your Vault disaster recovery solution is only the first step. You also need to validate the DR solution, as not doing so can negatively impact your organization's Recovery Point Objective (RPO) and Recovery Time Objective (RTO).
+
+**Recommended pattern:**
+
+Vault's Disaster Recovery (DR) replication mode provides a warm standby for
+failover if the primary cluster experiences catastrophic failure. You should
+periodically test the disaster recovery replication cluster by completing the
+failover and failback procedure.
+
+- [Vault disaster recovery replication failover and failback tutorial](/vault/tutorials/enterprise/disaster-recovery-replication-failover)
+- [Vault Enterprise replication](/vault/docs/enterprise/replication)
+- [Monitoring Vault replication](/vault/tutorials/monitoring/monitor-replication)
+
+You should establish standard operating procedures for restoring a Vault cluster from a snapshot. The restoration methods following a DR situation would be in response to data corruption or sabotage, which Disaster Recovery Replication might be unable to protect against.
+
+- [Standard procedure for restoring a Vault cluster](/vault/tutorials/standard-procedures/sop-restore)
+
+**Anti-pattern issue:**
+
+If you don't test your disaster recovery solution, your key stakeholders will not feel confident they can effectively perform the disaster recovery plan. Testing the DR solution also helps your team to remove uncertainty around recovering the system during an outage.
+
+## Improve upgrade cadence
+
+While it might be easy to upgrade Vault whenever you have capacity, not having a frequent upgrade cadence can impact your Vault performance and security.
+
+**Recommended pattern:**
+
+We recommend upgrading to our latest version of Vault. Subscribe to the releases in [Vault's GitHub repository](https://github.com/hashicorp/vault), and notifications from [HashiCorp Vault discuss](https://discuss.hashicorp.com/c/release-notifications/57), will inform you when we release a new Vault version.
+
+- [Vault upgrade guides](/vault/docs/upgrading)
+- [Vault feature deprecation notice and plans](/vault/docs/deprecation)
+
+**Anti-pattern issue:**
+
+When you do not keep a regular upgrade cadence, your Vault environment could be missing key features or improvements.
+
+- Missing patches for bugs or vulnerabilities as documented in the [CHANGELOG](https://github.com/hashicorp/vault/blob/main/CHANGELOG.md).
+- New features to improve workflow.
+- Must use version-specific rather than the latest documentation.
+- Some educational resourcesrequire a specific minimum Vault version.
+- Updates may require a stepped approach that uses an intermediate version before installing the latest binary.
+
+## Test before upgrades
+
+We recommend testing Vault in a sandbox environment before deploying to production.
+
+Although it might be faster to upgrade immediately in production, testing will help identify any compatibility issues.
+
+Be aware of the [CHANGELOG](https://github.com/hashicorp/vault/blob/main/CHANGELOG.md) and account for any new features, improvements, known issues and bug fixes in your testing.
+
+**Recommended pattern:**
+
+Test new Vault versions in sandbox environments before upgrading in production and follow our upgrading documentation.
+
+We recommend adding a testing phase to your standard upgrade procedure.
+
+- [Vault upgrade standard procedure](/vault/tutorials/standard-procedures/sop-upgrade)
+- [Upgrading Vault](/vault/docs/upgrading)
+
+**Anti-pattern issue:**
+
+Without adequate testing before upgrading in production, you risk compatibility and performance issues.
+
+
+
+This could lead to downtime or degradation in your production Vault environment.
+
+
+
+## Rotate audit device logs
+
+Audit devices in Vault maintain a detailed log of every client request and server response.
+
+If you allow the logs for audit devices to run perpetually without rotating you may face a blocked audit device if the filesystem storage becomes exhausted.
+
+**Recommended pattern:**
+
+Inspect and rotate audit logs periodically.
+
+- [Blocked audit devices tutorial](/vault/tutorials/monitoring/blocked-audit-devices)
+- [blocked audit devices](/vault/docs/audit#blocked-audit-devices)
+
+**Anti-pattern issue:**
+
+Vault will not respond to requests when audit devices are not enabled to record them.
+
+The audit device can exhaust the local storage if the audit device log is not maintained and rotated over time.
+
+## Monitor metrics
+
+Relying solely on Vault operational logs and data in Vault UI will give you a partial picture of the cluster's performance.
+
+
+**Recommended pattern:**
+
+Continuous monitoring will allow organizations to detect minor problems and promptly resolve them.
+Migrating from reactive to proactive monitoring will help to prevent system failures. Vault has multiple outputs
+that help monitor the cluster's activity: audit logs, operational logs, and telemetry data. This data can work
+with a SIEM (security information and event management) tool for aggregation, inspection, and alerting capabilities.
+
+- [Telemetry](/vault/docs/internals/telemetry#secrets-engines-metric)
+- [Telemetry metrics reference](/vault/tutorials/monitoring/telemetry-metrics-reference)
+
+Adding a monitoring solution:
+- [Audit device logs and incident response with elasticsearch](/vault/tutorials/monitoring/audit-elastic-incident-response)
+- [Monitor telemetry & audit device log data](/vault/tutorials/monitoring/monitor-telemetry-audit-splunk)
+- [Monitor telemetry with Prometheus & Grafana](/vault/tutorials/monitoring/monitor-telemetry-grafana-prometheus)
+
+
+
+
+ Vault logs to standard output and standard error by default, automatically captured by the systemd journal. You can also instruct Vault to redirect operational log writes to a file.
+
+
+
+**Anti-pattern issue:**
+
+Having partial insight into cluster activity can leave the business in a reactive state.
+
+## Establish usage baseline
+
+A baseline provides insight into current utilization and thresholds. Telemetry metrics are valuable, especially when monitored over time. You can use telemetry metrics to gather a baseline of cluster activity, while alerts inform you of abnormal activity.
+
+**Recommended pattern:**
+
+Telemetry information can also be streamed directly from Vault to a range of metrics aggregation solutions and
+saved for aggregation and inspection.
+
+- [Vault usage metrics](/vault/tutorials/monitoring/usage-metrics)
+- [Diagnose server issues](/vault/tutorials/monitoring/diagnose-startup-issues)
+
+**Anti-pattern issue:**
+
+This issue closely relates to the recommended pattern for [monitor metrics](#monitor-metrics).
+ Telemetry data is
+only held in memory for a short period.
+
+## Minimize root token use
+
+Initializing a Vault server emits an initial root token that gives root-level access across all Vault features.
+
+**Recommended pattern:**
+
+We recommend that you revoke the root token after initializing Vault within your environment. If users require elevated access, create access control list policies that grant proper capabilities on the necessary paths in Vault. If your operations require the root token, keep it for the shortest possible time before revoking it.
+
+- [Generate root tokens tutorial](/vault/tutorials/operations/generate-root)
+- [Root tokens](/vault/docs/concepts/tokens#root-tokens)
+- [Vault policies](/vault/docs/concepts/policies)
+
+**Anti-pattern issue:**
+
+A root token can perform all actions within Vault and never expire. Unrestricted access can give users higher privileges than necessary to all Vault operations and paths. Sharing and providing access to root tokens poses a security risk.
+
+## Rekey when necessary
+
+Vault distributes unsealed keys to stakeholders. A quorum of keys is needed to unlock Vault based on your initialization settings.
+
+**Recommended pattern:**
+
+Vault supports rekeying, and you should establish a workflow for rekeying when necessary.
+
+- [Rekeying & rotating Vault](/vault/tutorials/operations/rekeying-and-rotating)
+- [Operator rekey](/vault/docs/commands/operator/rekey)
+
+**Anti-pattern issue:**
+
+If several stakeholders leave the organization, you risk not having the required key shares to meet the unseal quorum, which could result in the loss of the ability to unseal Vault.
diff --git a/website/content/docs/interoperability-matrix.mdx b/website/content/docs/interoperability-matrix.mdx
index 1c5029b31d8a..aa921fa22a4c 100644
--- a/website/content/docs/interoperability-matrix.mdx
+++ b/website/content/docs/interoperability-matrix.mdx
@@ -59,6 +59,7 @@ Vault Secrets Engine Key: EKM Provider = Vault EK
| Cloudian | HyperStore 7.5.1 | KMIP | 1.12 | N/A |
| Cockroach Labs | Cockroach Cloud DB | KMSE | 1.10 | N/A |
| Cockroach Labs | Cockroach DB | Transit | 1.10 | Yes |
+| Cohesity | Cohesity DataPlatform | KMIP | 1.13.2 | N/A |
| Commvault Systems | CommVault | KMIP | 1.9 | N/A |
| Cribl | Cribl Stream | KV | 1.8 | Yes |
| DataStax | DataStax Enterprise | KMIP | 1.11 | Yes |
@@ -84,7 +85,11 @@ Vault Secrets Engine Key: EKM Provider = Vault EK
| Oracle | Oracle 19c | PKCS#11 | 1.11 | N/A |
| Percona | Server 8.0 | KMIP | 1.9 | N/A |
| Percona | XtraBackup 8.0 | KMIP | 1.9 | N/A |
+| Rubrik | CDM 9.1 (Edge) | KMIP | 1.16.2 | N/A |
+| Scality | Scality RING | KMIP | 1.12 | N/A |
| Snowflake | Snowflake | KMSE | 1.6 | N/A |
+| Veeam | Karsten K10 | Transit | 1.9 | N/A |
+| Veritas | NetBackup | KMIP | 1.13.9 | N/A |
| VMware | vSphere 7.0, 8.0 | KMIP | 1.2 | N/A |
| VMware | vSan 7.0, 8.0 | KMIP | 1.2 | N/A |
| Yugabyte | Yugabyte Platform | Transit | 1.9 | No |
diff --git a/website/content/docs/platform/aws/lambda-extension.mdx b/website/content/docs/platform/aws/lambda-extension.mdx
index 3702c96aaa5e..d9fe60028b88 100644
--- a/website/content/docs/platform/aws/lambda-extension.mdx
+++ b/website/content/docs/platform/aws/lambda-extension.mdx
@@ -273,6 +273,13 @@ synchronously refresh its own token before proxying requests if the token is
expired (including a grace window), and it will attempt to renew its token if the
token is nearly expired but renewable.
+
+
+ The Vault Lambda extension does not currently work with
+ [AWS SnapStart](https://docs.aws.amazon.com/lambda/latest/dg/snapstart.html).
+
+
+
## Performance impact
AWS Lambda pricing is based on [number of invocations, time of execution and memory
diff --git a/website/content/docs/platform/k8s/vso/api-reference.mdx b/website/content/docs/platform/k8s/vso/api-reference.mdx
index 57c1f1c18feb..cb2ccf98eb10 100644
--- a/website/content/docs/platform/k8s/vso/api-reference.mdx
+++ b/website/content/docs/platform/k8s/vso/api-reference.mdx
@@ -7,7 +7,7 @@ description: >-
# API Reference
@@ -212,7 +212,7 @@ with a timestamp value of when the trigger was executed.
E.g. vso.secrets.hashicorp.com/restartedAt: "2023-03-23T13:39:31Z"
-Supported resources: Deployment, DaemonSet, StatefulSet
+Supported resources: Deployment, DaemonSet, StatefulSet, argo.Rollout
@@ -224,8 +224,8 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `kind` _string_ | | | Enum: [Deployment DaemonSet StatefulSet]
|
-| `name` _string_ | | | |
+| `kind` _string_ | Kind of the resource | | Enum: [Deployment DaemonSet StatefulSet argo.Rollout]
|
+| `name` _string_ | Name of the resource | | |
#### SecretTransformation
diff --git a/website/content/docs/platform/k8s/vso/helm.mdx b/website/content/docs/platform/k8s/vso/helm.mdx
index 05d0e219d082..ac29f51ca49b 100644
--- a/website/content/docs/platform/k8s/vso/helm.mdx
+++ b/website/content/docs/platform/k8s/vso/helm.mdx
@@ -11,7 +11,7 @@ The chart is customizable using
[Helm configuration values](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing).
+ the vault-secrets-operator repo's values.yaml: file commit=bf1746f1937f25b4cb28f15d4b818303f3a78dd9 -->
## Top-Level Stanzas
@@ -34,6 +34,16 @@ Use these links to navigate to a particular top-level stanza.
- `replicas` ((#v-controller-replicas)) (`integer: 1`) - Set the number of replicas for the operator.
+ - `strategy` ((#v-controller-strategy)) (`object: ""`) - Configure update strategy for multi-replica deployments.
+ Kubernetes supports types Recreate, and RollingUpdate
+ ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
+ Example:
+ strategy: {}
+ rollingUpdate:
+ maxSurge: 1
+ maxUnavailable: 0
+ type: RollingUpdate
+
- `hostAliases` ((#v-controller-hostaliases)) (`array