diff --git a/.forbiddenapis/banned-signatures-common.txt b/.forbiddenapis/banned-signatures-common.txt
index 23203f1247cfc..8e773e90bd467 100644
--- a/.forbiddenapis/banned-signatures-common.txt
+++ b/.forbiddenapis/banned-signatures-common.txt
@@ -6,3 +6,13 @@ org.jboss.jandex.AnnotationValue#toString()
@defaultMessage Replace this by using InputStream.transferTo(OutputStream)
org.apache.commons.io.IOUtils#copy(java.io.InputStream,java.io.OutputStream)
org.apache.commons.compress.utils.IOUtils#copy(java.io.InputStream,java.io.OutputStream)
+
+@defaultMessage We should avoid using WildFly Common APIs
+org.wildfly.common.Assert
+org.wildfly.common.net.**
+org.wildfly.common.os.**
+
+@defaultMessage Use JUnit asserts
+io.smallrye.common.Assert.assertTrue(**)
+io.smallrye.common.Assert.assertFalse(**)
+io.smallrye.common.Assert.assertNotNull(**)
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index c066b9b432ddf..a356ecdfc9426 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -36,6 +36,7 @@ updates:
- dependency-name: com.google.cloud.tools:jib-core
- dependency-name: org.jboss.threads:jboss-threads
- dependency-name: org.jboss.marshalling:*
+ - dependency-name: org.wildfly.common:*
# Quarkus
- dependency-name: io.quarkus.*:*
- dependency-name: io.quarkus:*
diff --git a/.github/matrix-jvm-tests.json b/.github/matrix-jvm-tests.json
index 4f2654e282c37..cdbb871a3b757 100644
--- a/.github/matrix-jvm-tests.json
+++ b/.github/matrix-jvm-tests.json
@@ -1,20 +1,46 @@
-[ {
- "name": "17",
+[
+ {
+ "name": "JVM Tests - JDK 17",
+ "java-version": 17,
+ "maven_args": "$JVM_TEST_MAVEN_ARGS",
+ "maven_opts": "-Xmx2g -XX:MaxMetaspaceSize=1g",
+ "os-name": "ubuntu-22.04"
+ },
+ {
+ "name": "JVM Tests - JDK 21",
+ "java-version": 21,
+ "java-version-gradle": 20,
+ "maven_args": "$JVM_TEST_MAVEN_ARGS",
+ "maven_opts": "-Xmx3g -XX:MaxMetaspaceSize=1g",
+ "os-name": "ubuntu-latest"
+ },
+ {
+ "name": "JVM Tests - JDK 17 Windows",
+ "java-version": 17,
+ "maven_args": "-DskipDocs -Dformat.skip",
+ "maven_opts": "-Xmx2g -XX:MaxMetaspaceSize=1g",
+ "os-name": "windows-latest"
+ },
+ {
+ "name": "JVM Integration Tests - JDK 17",
+ "category": "Integration",
"java-version": 17,
"maven_args": "$JVM_TEST_MAVEN_ARGS",
"maven_opts": "-Xmx2g -XX:MaxMetaspaceSize=1g",
"os-name": "ubuntu-latest"
-}
-, {
- "name": "21",
+ },
+ {
+ "name": "JVM Integration Tests - JDK 21",
+ "category": "Integration",
"java-version": 21,
"java-version-gradle": 20,
"maven_args": "$JVM_TEST_MAVEN_ARGS",
"maven_opts": "-Xmx3g -XX:MaxMetaspaceSize=1g",
"os-name": "ubuntu-latest"
-}
-, {
- "name": "17 Windows",
+ },
+ {
+ "name": "JVM Integration Tests - JDK 17 Windows",
+ "category": "Integration",
"java-version": 17,
"maven_args": "-DskipDocs -Dformat.skip",
"maven_opts": "-Xmx2g -XX:MaxMetaspaceSize=1g",
diff --git a/.github/native-tests.json b/.github/native-tests.json
index ee6584b2b45f4..3e8c8cf1b02b5 100644
--- a/.github/native-tests.json
+++ b/.github/native-tests.json
@@ -22,13 +22,13 @@
"category": "Data3",
"timeout": 75,
"test-modules": "flyway, hibernate-orm-panache, hibernate-orm-panache-kotlin, hibernate-orm-envers, liquibase, liquibase-mongodb",
- "os-name": "ubuntu-latest"
+ "os-name": "ubuntu-latest"
},
{
"category": "Data4",
"timeout": 60,
"test-modules": "mongodb-client, mongodb-devservices, mongodb-panache, mongodb-rest-data-panache, mongodb-panache-kotlin, redis-client, hibernate-orm-rest-data-panache",
- "os-name": "ubuntu-latest"
+ "os-name": "ubuntu-latest"
},
{
"category": "Data5",
@@ -44,8 +44,14 @@
},
{
"category": "Data7",
- "timeout": 85,
- "test-modules": "reactive-oracle-client, reactive-mysql-client, reactive-db2-client, hibernate-reactive-db2, hibernate-reactive-mariadb, hibernate-reactive-mysql, hibernate-reactive-mysql-agroal-flyway, hibernate-reactive-panache, hibernate-reactive-panache-kotlin",
+ "timeout": 90,
+ "test-modules": "reactive-oracle-client, reactive-mysql-client, reactive-db2-client, hibernate-reactive-db2, hibernate-reactive-mariadb, hibernate-reactive-mssql, hibernate-reactive-mysql, hibernate-reactive-mysql-agroal-flyway, hibernate-reactive-panache, hibernate-reactive-panache-kotlin",
+ "os-name": "ubuntu-latest"
+ },
+ {
+ "category": "Build tools and DevTools",
+ "timeout": 75,
+ "test-modules": "maven, gradle, devtools-registry-client",
"os-name": "ubuntu-latest"
},
{
@@ -76,48 +82,48 @@
"category": "Security2",
"timeout": 75,
"test-modules": "oidc, oidc-code-flow, oidc-tenancy, oidc-client, oidc-client-reactive, oidc-token-propagation, oidc-wiremock, oidc-client-wiremock, oidc-wiremock-providers, oidc-dev-services",
- "os-name": "ubuntu-latest"
- },
- {
- "category": "Security3",
- "timeout": 55,
- "test-modules": "keycloak-authorization, smallrye-jwt-token-propagation, security-webauthn",
- "os-name": "ubuntu-latest"
- },
- {
- "category": "Cache",
- "timeout": 75,
- "test-modules": "infinispan-cache-jpa, infinispan-client, cache, redis-cache, infinispan-cache",
- "os-name": "ubuntu-latest"
- },
- {
- "category": "HTTP",
- "timeout": 130,
- "test-modules": "elytron-resteasy, resteasy-jackson, elytron-resteasy-reactive, resteasy-mutiny, resteasy-reactive-kotlin/standard, vertx, vertx-http, vertx-web, vertx-http-compressors/all, vertx-http-compressors/some, vertx-web-jackson, vertx-graphql, virtual-http, rest-client, rest-client-reactive, rest-client-reactive-stork, rest-client-reactive-multipart, websockets, websockets-next, management-interface, management-interface-auth, mutiny-native-jctools",
- "os-name": "ubuntu-latest"
- },
- {
- "category": "Misc1",
- "timeout": 70,
- "test-modules": "maven, jackson, jsonb, kotlin, rest-client-reactive-kotlin-serialization, quartz, qute, logging-min-level-unset, logging-min-level-set, simple with space",
+ "os-name": "ubuntu-latest"
+ },
+ {
+ "category": "Security3",
+ "timeout": 55,
+ "test-modules": "keycloak-authorization, smallrye-jwt-token-propagation, security-webauthn",
+ "os-name": "ubuntu-latest"
+ },
+ {
+ "category": "Cache",
+ "timeout": 75,
+ "test-modules": "infinispan-cache-jpa, infinispan-client, cache, redis-cache, infinispan-cache",
+ "os-name": "ubuntu-latest"
+ },
+ {
+ "category": "HTTP",
+ "timeout": 130,
+ "test-modules": "elytron-resteasy, resteasy-jackson, elytron-resteasy-reactive, resteasy-mutiny, resteasy-reactive-kotlin/standard, vertx, vertx-http, vertx-web, vertx-http-compressors/all, vertx-http-compressors/some, vertx-web-jackson, vertx-graphql, virtual-http, rest-client, rest-client-reactive, rest-client-reactive-stork, rest-client-reactive-multipart, websockets, websockets-next, management-interface, management-interface-auth, mutiny-native-jctools",
+ "os-name": "ubuntu-latest"
+ },
+ {
+ "category": "Misc1",
+ "timeout": 70,
+ "test-modules": "jackson, jsonb, kotlin, rest-client-reactive-kotlin-serialization, quartz, qute, logging-min-level-unset, logging-min-level-set, simple with space, web-dependency-locator",
"os-name": "ubuntu-latest"
},
{
"category": "Misc2",
"timeout": 75,
- "test-modules": "hibernate-validator, test-extension/tests, logging-gelf, mailer, native-config-profile, locales/all, locales/some, locales/default",
+ "test-modules": "hibernate-validator, test-extension/tests, logging-gelf, mailer, native-config-profile, locales/all, locales/some, locales/default, jaxp, jaxb",
"os-name": "ubuntu-latest"
},
{
"category": "Misc3",
"timeout": 80,
- "test-modules": "kubernetes-client, openshift-client, kubernetes-service-binding-jdbc, smallrye-config, smallrye-graphql, smallrye-graphql-client, smallrye-graphql-client-keycloak, smallrye-metrics",
+ "test-modules": "kubernetes-client, openshift-client, kubernetes-service-binding-jdbc, smallrye-config, smallrye-graphql, smallrye-graphql-client, smallrye-graphql-client-keycloak, picocli-native",
"os-name": "ubuntu-latest"
},
{
- "category": "Misc4",
+ "category": "Observability",
"timeout": 130,
- "test-modules": "picocli-native, gradle, micrometer-mp-metrics, micrometer-prometheus, logging-json, jaxp, jaxb, observability-lgtm, opentelemetry, opentelemetry-jdbc-instrumentation, opentelemetry-mongodb-client-instrumentation, opentelemetry-redis-instrumentation, web-dependency-locator",
+ "test-modules": "smallrye-metrics, micrometer-mp-metrics, micrometer-prometheus, logging-json, observability-lgtm, opentelemetry, opentelemetry-jdbc-instrumentation, opentelemetry-mongodb-client-instrumentation, opentelemetry-redis-instrumentation, micrometer-opentelemetry",
"os-name": "ubuntu-latest"
},
{
@@ -138,12 +144,6 @@
"test-modules": "resteasy-jackson, qute, liquibase",
"os-name": "windows-latest"
},
- {
- "category": "DevTools Integration Tests",
- "timeout": 75,
- "test-modules": "devtools-registry-client",
- "os-name": "ubuntu-latest"
- },
{
"category": "AWT, ImageIO and Java2D, Packaging .so files",
"timeout": 40,
diff --git a/.github/quarkus-github-bot.yml b/.github/quarkus-github-bot.yml
index baee30aab0814..161c666ea22b8 100644
--- a/.github/quarkus-github-bot.yml
+++ b/.github/quarkus-github-bot.yml
@@ -37,6 +37,11 @@ projects:
triage:
discussions:
monitoredCategories: [33575230]
+ guardedBranches:
+ - ref: 3.15
+ notify: [jmartisk, gsmet, gastaldi, rsvoboda, aloubyansky]
+ - ref: 3.8
+ notify: [jmartisk, gsmet, gastaldi, rsvoboda, aloubyansky]
rules:
- id: amazon-lambda
labels: [area/amazon-lambda]
@@ -470,7 +475,7 @@ triage:
notify: [radcortez]
- id: core
labels: [area/core]
- notify: [aloubyansky, gsmet, geoand, radcortez, Sanne, stuartwdouglas]
+ notify: [aloubyansky, gsmet, geoand, radcortez, Sanne]
directories:
- core/
- id: dependencies
@@ -624,7 +629,7 @@ triage:
- id: rest
labels: [area/rest]
title: resteasy.reactive
- notify: [geoand, FroMage, stuartwdouglas]
+ notify: [geoand, FroMage]
directories:
- extensions/resteasy-reactive/
- id: scala
@@ -719,11 +724,11 @@ triage:
- id: continuous-testing
labels: [area/continuous-testing]
title: "continuous.testing"
- notify: [stuartwdouglas]
+ notify: [holly-cummins, geoand]
- id: devservices
labels: [area/devservices]
title: "dev.?services?"
- notify: [stuartwdouglas, geoand]
+ notify: [geoand]
- id: jdbc
labels: [area/jdbc]
title: "jdbc"
diff --git a/.github/quarkus-github-lottery.yml b/.github/quarkus-github-lottery.yml
index 656c638938e6e..99bb505e8ae7e 100644
--- a/.github/quarkus-github-lottery.yml
+++ b/.github/quarkus-github-lottery.yml
@@ -29,6 +29,23 @@ buckets:
timeout: P14D
ignoreLabels: ["triage/on-ice"]
participants:
+ - username: "DavideD"
+ timezone: "Europe/Vienna"
+ triage:
+ days: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY"]
+ maxIssues: 3
+ maintenance:
+ labels: ["area/hibernate-orm", "area/hibernate-reactive", "area/hibernate-validator", "area/hibernate-search", "area/jdbc"]
+ days: ["WEDNESDAY", "THURSDAY", "FRIDAY"] # Count me out on Monday and Tuesday.
+ created:
+ maxIssues: 3
+ feedback:
+ needed:
+ maxIssues: 3
+ provided:
+ maxIssues: 3
+ stale:
+ maxIssues: 1
- username: "yrodiere"
timezone: "Europe/Paris"
triage:
diff --git a/.github/workflows/ci-actions-incremental.yml b/.github/workflows/ci-actions-incremental.yml
index b1198cd0cd23a..1d787e1cea6d5 100644
--- a/.github/workflows/ci-actions-incremental.yml
+++ b/.github/workflows/ci-actions-incremental.yml
@@ -53,7 +53,11 @@ env:
COMMON_MAVEN_ARGS: "-e -B --settings .github/mvn-settings.xml --fail-at-end"
COMMON_TEST_MAVEN_ARGS: "-Dformat.skip -Denforcer.skip -DskipDocs -Dforbiddenapis.skip -DskipExtensionValidation -DskipCodestartValidation"
NATIVE_TEST_MAVEN_ARGS: "-Dtest-containers -Dstart-containers -Dquarkus.native.native-image-xmx=6g -Dnative -Dnative.surefire.skip -Dno-descriptor-tests clean install -DskipDocs"
- JVM_TEST_MAVEN_ARGS: "-Dtest-containers -Dstart-containers -Dquarkus.test.hang-detection-timeout=60"
+ JVM_TEST_MAVEN_ARGS: "-Dtest-containers -Dstart-containers -Dquarkus.test.hang-detection-timeout=300"
+ # Important: keep these selectors in sync with the grep commands in the calc_run_flags job!
+ # This may be a lot better with maven 4, but with maven 3, excluding a project does not exclude its children, and it's not possible to include a project and explicitly exclude some children; compensate by doing excludes the low-tech way, at the shell level
+ JVM_TEST_INTEGRATION_TESTS_SELECTOR: "-f integration-tests -pl !gradle -pl !maven -pl !devmode -pl !devtools"
+ JVM_TEST_NORMAL_TESTS_SELECTOR: "-pl !docs -Dno-test-modules"
PTS_MAVEN_ARGS: "-Ddevelocity.pts.enabled=${{ github.event_name == 'pull_request' && github.base_ref == 'main' && 'true' || 'false' }}"
DB_USER: hibernate_orm_test
DB_PASSWORD: hibernate_orm_test
@@ -336,7 +340,7 @@ jobs:
echo "run_tcks=${run_tcks}" >> $GITHUB_OUTPUT
jvm-tests:
- name: JVM Tests - JDK ${{matrix.java.name}}
+ name: ${{ matrix.java.name }}
runs-on: ${{ matrix.java.os-name }}
needs: [build-jdk17, calculate-test-jobs]
# Skip main in forks
@@ -352,8 +356,8 @@ jobs:
steps:
- name: Gradle Enterprise environment
run: |
- echo "GE_TAGS=jdk-${{matrix.java.name}}" >> "$GITHUB_ENV"
- echo "GE_CUSTOM_VALUES=gh-job-name=JVM Tests - JDK ${{matrix.java.name}}" >> "$GITHUB_ENV"
+ echo "GE_TAGS=jdk-${{matrix.java.java-version}}" >> "$GITHUB_ENV"
+ echo "GE_CUSTOM_VALUES=gh-job-name=${{ matrix.java.name }}" >> "$GITHUB_ENV"
- name: Stop mysql
if: "!startsWith(matrix.java.os-name, 'windows') && !startsWith(matrix.java.os-name, 'macos')"
run: |
@@ -419,14 +423,14 @@ jobs:
if: github.event_name == 'pull_request'
with:
path: ~/.m2/.develocity/build-cache
- key: develocity-cache-JVM Tests - JDK ${{matrix.java.name}}-${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}
+ key: develocity-cache-${{matrix.java.name}}-${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}
restore-keys: |
- develocity-cache-JVM Tests - JDK ${{matrix.java.name}}-${{ github.event.pull_request.number }}-
+ develocity-cache-${{matrix.java.name}}-${{ github.event.pull_request.number }}-
- name: Setup Develocity Build Scan capture
uses: gradle/develocity-actions/setup-maven@v1.3
with:
capture-strategy: ON_DEMAND
- job-name: "JVM Tests - JDK ${{matrix.java.name}}"
+ job-name: "${{ matrix.java.name }}"
add-pr-comment: false
add-job-summary: false
develocity-access-key: ${{ secrets.GRADLE_ENTERPRISE_ACCESS_KEY }}
@@ -434,8 +438,14 @@ jobs:
- name: Build
env:
CAPTURE_BUILD_SCAN: true
- # Despite the pre-calculated run_jvm flag, GIB has to be re-run here to figure out the exact submodules to build.
- run: ./mvnw $COMMON_MAVEN_ARGS $COMMON_TEST_MAVEN_ARGS $PTS_MAVEN_ARGS clean install -Dsurefire.timeout=1200 -pl !integration-tests/gradle -pl !integration-tests/maven -pl !integration-tests/devmode -pl !integration-tests/devtools -Dno-test-kubernetes -pl !docs ${{ matrix.java.maven_args }} ${{ needs.build-jdk17.outputs.gib_args }}
+ run: |
+ if [[ "${{ matrix.java.category }}" == *"Integration"* ]]; then
+ PL=$JVM_TEST_INTEGRATION_TESTS_SELECTOR
+ else
+ PL=$JVM_TEST_NORMAL_TESTS_SELECTOR
+ fi
+ # Despite the pre-calculated run_jvm flag, GIB has to be re-run here to figure out the exact submodules to build.
+ ./mvnw $COMMON_MAVEN_ARGS $COMMON_TEST_MAVEN_ARGS $PTS_MAVEN_ARGS $PL clean install -Dsurefire.timeout=1200 -Dno-test-kubernetes ${{ matrix.java.maven_args }} ${{ needs.build-jdk17.outputs.gib_args }}
- name: Clean Gradle temp directory
if: always()
run: devtools/gradle/gradlew --stop && rm -rf devtools/gradle/gradle-extension-plugin/build/tmp
@@ -449,7 +459,7 @@ jobs:
uses: actions/upload-artifact@v4
if: failure()
with:
- name: test-reports-jvm${{matrix.java.name}}
+ name: test-reports-${{matrix.java.name}}
path: 'test-reports.tgz'
retention-days: 7
- name: Prepare build reports archive
@@ -464,7 +474,7 @@ jobs:
uses: actions/upload-artifact@v4
if: always()
with:
- name: "build-reports-${{ github.run_attempt }}-JVM Tests - JDK ${{matrix.java.name}}"
+ name: "build-reports-${{ github.run_attempt }}-${{ matrix.java.name }}"
path: |
build-reports.zip
retention-days: 7
@@ -474,7 +484,7 @@ jobs:
# -- even in case of success, as some flaky tests won't fail the build
if: always()
with:
- name: "debug-${{ github.run_attempt }}-JVM Tests - JDK ${{matrix.java.name}}"
+ name: "debug-${{ github.run_attempt }}-${{ matrix.java.name }}"
path: "**/target/debug/**"
if-no-files-found: ignore # If we're not currently debugging any test, it's fine.
retention-days: 28 # We don't get notified for flaky tests, so let's give maintainers time to get back to it
@@ -482,7 +492,7 @@ jobs:
uses: actions/upload-artifact@v4
if: ${{ failure() || cancelled() }}
with:
- name: "build-logs-JVM Tests - JDK ${{matrix.java.name}}"
+ name: "build-logs-${{ matrix.java.name }}"
path: |
**/build.log
retention-days: 7
diff --git a/.github/workflows/doc-build.yml b/.github/workflows/doc-build.yml
index 137e9b99695e7..7e18386ef2c8e 100644
--- a/.github/workflows/doc-build.yml
+++ b/.github/workflows/doc-build.yml
@@ -50,7 +50,7 @@ jobs:
timeout-minutes: 60
runs-on: ubuntu-latest
# Skip main in forks
- if: "github.repository == 'quarkusio/quarkus' || !endsWith(github.ref, '/main')"
+ if: github.repository == 'quarkusio/quarkus' || !endsWith(github.ref, '/main')
steps:
- uses: actions/checkout@v4
- name: Set up JDK 17
@@ -104,3 +104,19 @@ jobs:
docs/
target/asciidoc/generated/config/
retention-days: 1
+
+ - name: Prepare build reports archive
+ if: always()
+ run: |
+ 7z a -tzip build-reports.zip -r \
+ '**/target/*-reports/TEST-*.xml' \
+ 'target/build-report.json' \
+ LICENSE
+ - name: Upload build reports
+ uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: "build-reports-${{ github.run_attempt }}-Documentation Build"
+ path: |
+ build-reports.zip
+ retention-days: 7
diff --git a/.github/workflows/native-cron-build.yml.disabled b/.github/workflows/native-cron-build.yml.disabled
index 0440990e0f744..84b3be4fee21d 100644
--- a/.github/workflows/native-cron-build.yml.disabled
+++ b/.github/workflows/native-cron-build.yml.disabled
@@ -20,7 +20,7 @@ jobs:
run: sudo systemctl stop mysql
- name: Pull docker image
- run: docker pull quay.io/quarkus/ubi-quarkus-graalvmce-builder-image:22.3-java${{ matrix.java }}
+ run: docker pull quay.io/quarkus/ubi9-quarkus-graalvmce-builder-image:22.3-java${{ matrix.java }}
- name: Set up JDK ${{ matrix.java }}
uses: actions/setup-java@v2
@@ -55,7 +55,7 @@ jobs:
run: ./mvnw -B install -DskipTests -DskipITs -Dformat.skip
- name: Run integration tests in native
- run: ./mvnw -B --settings .github/mvn-settings.xml verify -f integration-tests/pom.xml --fail-at-end -Dno-format -Dtest-containers -Dstart-containers -Dnative -Dquarkus.native.container-build=true -Dquarkus.native.builder-image=quay.io/quarkus/ubi-quarkus-graalvmce-builder-image:22.3-java${{ matrix.java }} -pl '!io.quarkus:quarkus-integration-test-google-cloud-functions-http,!io.quarkus:quarkus-integration-test-google-cloud-functions,!io.quarkus:quarkus-integration-test-funqy-google-cloud-functions'
+ run: ./mvnw -B --settings .github/mvn-settings.xml verify -f integration-tests/pom.xml --fail-at-end -Dno-format -Dtest-containers -Dstart-containers -Dnative -Dquarkus.native.container-build=true -Dquarkus.native.builder-image=quay.io/quarkus/ubi9-quarkus-graalvmce-builder-image:22.3-java${{ matrix.java }} -pl '!io.quarkus:quarkus-integration-test-google-cloud-functions-http,!io.quarkus:quarkus-integration-test-google-cloud-functions,!io.quarkus:quarkus-integration-test-funqy-google-cloud-functions'
- name: Report
if: always()
diff --git a/.mvn/extensions.xml b/.mvn/extensions.xml
index b8529bff74821..79afa0aa0acb0 100644
--- a/.mvn/extensions.xml
+++ b/.mvn/extensions.xml
@@ -2,7 +2,7 @@
com.gradledevelocity-maven-extension
- 1.23
+ 1.23.1com.gradle
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 3fdd79b643051..6da739594c516 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -139,7 +139,14 @@ You can check the last publication date here: [!NOTE]
+> It is recommended to build Quarkus with Java 17 as it is the minimum requirement for Quarkus.
+>
+> You can however build Quarkus with more recent JDKs (such as Java 21) but some Gradle-related modules need to be able to find a Java 17 toolchain so you will need to have Java 17 around.
+>
+> The easiest way to achieve that is to use [SDKMAN!](https://sdkman.io/) to install Java 17 alongside your preferred JDK: it will be automatically detected by Gradle when building the Gradle modules.
+
+You can build Quarkus using the following commands:
```sh
git clone git@github.com:quarkusio/quarkus.git
@@ -252,7 +259,6 @@ If you have not done so on this machine, you need to:
* Windows:
* enable longpaths: `git config --global core.longpaths true`
* avoid CRLF breaks: `git config --global core.autocrlf false`
- * enable symlinks: `git config --global core.symlinks true`
* Install Java SDK 17+ (OpenJDK recommended)
* Install [GraalVM](https://quarkus.io/guides/building-native-image)
* Install platform C developer tools:
@@ -421,7 +427,7 @@ productive. The following Maven tips can vastly speed up development when workin
[mvnd](https://github.com/apache/maven-mvnd) is a daemon for Maven providing faster builds.
It parallelizes your builds by default and makes sure the output is consistent even for a parallelized build.
-You can https://github.com/apache/maven-mvnd?tab=readme-ov-file#how-to-install-mvnd[install mvnd] with SDKMAN!, Homebrew...
+You can [install mvnd](https://github.com/apache/maven-mvnd?tab=readme-ov-file#how-to-install-mvnd) with SDKMAN!, Homebrew...
mvnd is a good companion for your Quarkus builds.
diff --git a/bom/application/pom.xml b/bom/application/pom.xml
index f4d18e095bd1a..db3e57c4ff8a2 100644
--- a/bom/application/pom.xml
+++ b/bom/application/pom.xml
@@ -22,7 +22,7 @@
9.0.55.0.03.0.2
- 3.2.3
+ 3.2.41.3.211.1.7
@@ -47,18 +47,18 @@
2.04.0.22.9.0
- 3.11.1
- 4.1.1
+ 3.11.2
+ 4.2.04.0.0
- 4.0.6
- 2.12.0
- 6.7.3
+ 4.0.8
+ 2.12.1
+ 6.8.04.6.12.2.01.0.133.0.1
- 3.17.1
- 4.26.0
+ 3.18.1
+ 4.27.02.7.02.1.33.0.0
@@ -82,7 +82,7 @@
4.0.59.7.12.18.0
- 16.0.0.Final
+ 16.1.0.Final3.0-alpha-22.1.0
@@ -91,7 +91,7 @@
2.18.21.0.0.Final3.17.0
- 1.17.2
+ 1.18.01.7.0
@@ -99,18 +99,18 @@
0.0.9.Final2.58.0.0.Final
- 8.17.0
+ 8.17.12.2.212.2.5.Final2.2.2.SP013.0.3.Final2.0.0.Final
- 1.7.0.Final
+ 2.0.11.0.1.Final2.6.0.Final2.2.2.Final3.8.0.Final
- 4.5.11
+ 4.5.124.5.144.4.164.1.5
@@ -123,22 +123,22 @@
8.3.012.8.1.jre111.6.7
- 23.5.0.24.07
+ 23.6.0.24.1010.16.1.112.1.0.01.2.62.25.10.5
- 15.0.11.Final
- 5.0.12.Final
- 3.1.8
- 4.1.115.Final
+ 15.0.13.Final
+ 5.0.13.Final
+ 3.2.0
+ 4.1.117.Final1.16.01.0.43.6.1.Final
- 2.7.0
+ 2.8.04.0.5
- 3.7.2
+ 3.9.01.8.01.1.10.50.109.1
@@ -158,14 +158,14 @@
3.2.04.2.23.1.1.Final
- 11.2.0
+ 11.3.13.0.44.29.14.29.12.36.0.0
- 5.3.0
+ 5.3.10.34.13.26.30.3.0
@@ -179,7 +179,7 @@
2.2.026.0.31.15.1
- 3.48.4
+ 3.49.02.36.00.27.21.45.3
@@ -188,12 +188,12 @@
1.1.41.27.11.13.0
- 2.11.0
+ 2.12.12.0.1.Final2.24.31.3.1.Final1.12.0
- 2.6.6.Final
+ 2.6.8.Final0.1.18.Final1.20.43.4.0
@@ -212,7 +212,7 @@
0.1.32.12.10.8.11
- 1.1.0
+ 1.1.13.3.02.12.4
@@ -248,7 +248,7 @@
import
-
+
io.quarkusquarkus-bom-dev-ui
@@ -502,7 +502,7 @@
org.jetbrainsannotations
- 26.0.1
+ 26.0.2
@@ -1004,6 +1004,16 @@
quarkus-oidc-client-graphql-deployment${project.version}
+
+ io.quarkus
+ quarkus-oidc-token-propagation-common-deployment
+ ${project.version}
+
+
+ io.quarkus
+ quarkus-oidc-token-propagation-common
+ ${project.version}
+ io.quarkusquarkus-resteasy-client-oidc-token-propagation
@@ -3199,6 +3209,16 @@
quarkus-micrometer${project.version}
+
+ io.quarkus
+ quarkus-micrometer-opentelemetry-deployment
+ ${project.version}
+
+
+ io.quarkus
+ quarkus-micrometer-opentelemetry
+ ${project.version}
+ io.quarkusquarkus-micrometer-registry-prometheus-deployment
diff --git a/bom/dev-ui/pom.xml b/bom/dev-ui/pom.xml
index 9cf2ab7296347..90bde85a3b092 100644
--- a/bom/dev-ui/pom.xml
+++ b/bom/dev-ui/pom.xml
@@ -13,7 +13,7 @@
Dependency management for dev-ui. Importable by third party extension developers.
- 24.6.1
+ 24.6.23.2.14.1.13.2.1
@@ -28,7 +28,7 @@
1.7.51.7.05.6.0
- 2.0.5
+ 2.0.92.4.01.0.171.0.1
diff --git a/build-parent/pom.xml b/build-parent/pom.xml
index 0470ec3298fed..1b31434ccfeda 100644
--- a/build-parent/pom.xml
+++ b/build-parent/pom.xml
@@ -29,11 +29,11 @@
1.6.Final
- 3.2.3
+ 3.2.41.0.02.5.13
- 4.8.0
+ 4.9.03.26.32.0.3.Final6.0.1
@@ -104,7 +104,7 @@
3.27.3
- 3.10.0
+ 3.11.07.3.0
@@ -597,7 +597,7 @@
org.apache.groovygroovy
- 4.0.24
+ 4.0.25
diff --git a/core/builder/pom.xml b/core/builder/pom.xml
index bcb4f3bcfd078..450e83a0ac9ec 100644
--- a/core/builder/pom.xml
+++ b/core/builder/pom.xml
@@ -25,6 +25,10 @@
org.wildfly.commonwildfly-common
+
+ io.smallrye.common
+ smallrye-common-constraint
+ org.jboss.loggingjboss-logging
diff --git a/core/builder/src/main/java/io/quarkus/builder/BuildChain.java b/core/builder/src/main/java/io/quarkus/builder/BuildChain.java
index 4e68cfd2d9e53..84d12ad767757 100644
--- a/core/builder/src/main/java/io/quarkus/builder/BuildChain.java
+++ b/core/builder/src/main/java/io/quarkus/builder/BuildChain.java
@@ -6,7 +6,7 @@
import java.util.ServiceLoader;
import java.util.Set;
-import org.wildfly.common.Assert;
+import io.smallrye.common.constraint.Assert;
/**
* A build chain.
diff --git a/core/builder/src/main/java/io/quarkus/builder/BuildChainBuilder.java b/core/builder/src/main/java/io/quarkus/builder/BuildChainBuilder.java
index 7d72d220502cc..aa242d9647553 100644
--- a/core/builder/src/main/java/io/quarkus/builder/BuildChainBuilder.java
+++ b/core/builder/src/main/java/io/quarkus/builder/BuildChainBuilder.java
@@ -19,9 +19,8 @@
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-import org.wildfly.common.Assert;
-
import io.quarkus.builder.item.BuildItem;
+import io.smallrye.common.constraint.Assert;
/**
* A build chain builder.
@@ -179,7 +178,7 @@ private Map> wireDependencies(Set entry : stepBuilder.getConsumes().entrySet()) {
final Consume consume = entry.getValue();
final ItemId id = entry.getKey();
- if (!consume.getFlags().contains(ConsumeFlag.OPTIONAL) && !id.isMulti()) {
+ if (!consume.flags().contains(ConsumeFlag.OPTIONAL) && !id.isMulti()) {
if (!initialIds.contains(id) && !allProduces.containsKey(id)) {
throw new ChainBuildException("No producers for required item " + id);
}
diff --git a/core/builder/src/main/java/io/quarkus/builder/BuildContext.java b/core/builder/src/main/java/io/quarkus/builder/BuildContext.java
index 4d2c23ee429f2..e6bd5a91d6177 100644
--- a/core/builder/src/main/java/io/quarkus/builder/BuildContext.java
+++ b/core/builder/src/main/java/io/quarkus/builder/BuildContext.java
@@ -12,13 +12,12 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
-import org.wildfly.common.Assert;
-
import io.quarkus.builder.diag.Diagnostic;
import io.quarkus.builder.item.BuildItem;
import io.quarkus.builder.item.MultiBuildItem;
import io.quarkus.builder.item.SimpleBuildItem;
import io.quarkus.builder.location.Location;
+import io.smallrye.common.constraint.Assert;
/**
* The context passed to a deployer's operation.
diff --git a/core/builder/src/main/java/io/quarkus/builder/BuildException.java b/core/builder/src/main/java/io/quarkus/builder/BuildException.java
index cef749bdd724c..4bd8cdd0bcd30 100644
--- a/core/builder/src/main/java/io/quarkus/builder/BuildException.java
+++ b/core/builder/src/main/java/io/quarkus/builder/BuildException.java
@@ -3,9 +3,8 @@
import java.util.Collections;
import java.util.List;
-import org.wildfly.common.Assert;
-
import io.quarkus.builder.diag.Diagnostic;
+import io.smallrye.common.constraint.Assert;
/**
* @author David M. Lloyd
diff --git a/core/builder/src/main/java/io/quarkus/builder/BuildExecutionBuilder.java b/core/builder/src/main/java/io/quarkus/builder/BuildExecutionBuilder.java
index de947a13fb98d..90a4d0b57df0c 100644
--- a/core/builder/src/main/java/io/quarkus/builder/BuildExecutionBuilder.java
+++ b/core/builder/src/main/java/io/quarkus/builder/BuildExecutionBuilder.java
@@ -6,9 +6,8 @@
import java.util.List;
import java.util.Map;
-import org.wildfly.common.Assert;
-
import io.quarkus.builder.item.BuildItem;
+import io.smallrye.common.constraint.Assert;
/**
* A builder for a deployer execution.
diff --git a/core/builder/src/main/java/io/quarkus/builder/BuildStepBuilder.java b/core/builder/src/main/java/io/quarkus/builder/BuildStepBuilder.java
index a873488c897c6..75431f9b6e694 100644
--- a/core/builder/src/main/java/io/quarkus/builder/BuildStepBuilder.java
+++ b/core/builder/src/main/java/io/quarkus/builder/BuildStepBuilder.java
@@ -6,9 +6,8 @@
import java.util.Set;
import java.util.function.BooleanSupplier;
-import org.wildfly.common.Assert;
-
import io.quarkus.builder.item.BuildItem;
+import io.smallrye.common.constraint.Assert;
/**
* A builder for build step instances within a chain. A build step can consume and produce items. It may also register
@@ -236,7 +235,7 @@ Map getProduces() {
Set getRealConsumes() {
final HashMap map = new HashMap<>(consumes);
- map.entrySet().removeIf(e -> e.getValue().getConstraint() == Constraint.ORDER_ONLY);
+ map.entrySet().removeIf(e -> e.getValue().constraint() == Constraint.ORDER_ONLY);
return map.keySet();
}
diff --git a/core/builder/src/main/java/io/quarkus/builder/Consume.java b/core/builder/src/main/java/io/quarkus/builder/Consume.java
index d64980fa102de..806bf8bdcb18a 100644
--- a/core/builder/src/main/java/io/quarkus/builder/Consume.java
+++ b/core/builder/src/main/java/io/quarkus/builder/Consume.java
@@ -1,42 +1,20 @@
package io.quarkus.builder;
-final class Consume {
- private final BuildStepBuilder buildStepBuilder;
- private final ItemId itemId;
- private final Constraint constraint;
- private final ConsumeFlags flags;
+import static io.quarkus.builder.Constraint.ORDER_ONLY;
+import static io.quarkus.builder.Constraint.REAL;
+import static io.quarkus.builder.ConsumeFlag.OPTIONAL;
- Consume(final BuildStepBuilder buildStepBuilder, final ItemId itemId, final Constraint constraint,
- final ConsumeFlags flags) {
- this.buildStepBuilder = buildStepBuilder;
- this.itemId = itemId;
- this.constraint = constraint;
- this.flags = flags;
- }
-
- BuildStepBuilder getBuildStepBuilder() {
- return buildStepBuilder;
- }
-
- ItemId getItemId() {
- return itemId;
- }
-
- ConsumeFlags getFlags() {
- return flags;
- }
+record Consume(BuildStepBuilder buildStepBuilder, ItemId itemId, Constraint constraint, ConsumeFlags flags) {
Consume combine(final Constraint constraint, final ConsumeFlags flags) {
- final Constraint outputConstraint = constraint == Constraint.REAL || this.constraint == Constraint.REAL
- ? Constraint.REAL
- : Constraint.ORDER_ONLY;
- final ConsumeFlags outputFlags = !flags.contains(ConsumeFlag.OPTIONAL) || !this.flags.contains(ConsumeFlag.OPTIONAL)
- ? flags.with(this.flags).without(ConsumeFlag.OPTIONAL)
- : flags.with(this.flags);
- return new Consume(buildStepBuilder, itemId, outputConstraint, outputFlags);
- }
-
- Constraint getConstraint() {
- return constraint;
+ return new Consume(
+ buildStepBuilder,
+ itemId,
+ constraint == REAL || this.constraint == REAL
+ ? REAL
+ : ORDER_ONLY,
+ !flags.contains(OPTIONAL) || !this.flags.contains(OPTIONAL)
+ ? flags.with(this.flags).without(OPTIONAL)
+ : flags.with(this.flags));
}
}
diff --git a/core/builder/src/main/java/io/quarkus/builder/ConsumeFlags.java b/core/builder/src/main/java/io/quarkus/builder/ConsumeFlags.java
index 4ab63939cf045..290547df67d62 100644
--- a/core/builder/src/main/java/io/quarkus/builder/ConsumeFlags.java
+++ b/core/builder/src/main/java/io/quarkus/builder/ConsumeFlags.java
@@ -1,8 +1,9 @@
package io.quarkus.builder;
-import org.wildfly.common.Assert;
import org.wildfly.common.flags.Flags;
+import io.smallrye.common.constraint.Assert;
+
/**
* Flags which can be set on consume declarations.
*/
diff --git a/core/builder/src/main/java/io/quarkus/builder/ItemId.java b/core/builder/src/main/java/io/quarkus/builder/ItemId.java
index 2e934ccefcacd..bbcfed998cad7 100644
--- a/core/builder/src/main/java/io/quarkus/builder/ItemId.java
+++ b/core/builder/src/main/java/io/quarkus/builder/ItemId.java
@@ -2,10 +2,9 @@
import java.util.Objects;
-import org.wildfly.common.Assert;
-
import io.quarkus.builder.item.BuildItem;
import io.quarkus.builder.item.MultiBuildItem;
+import io.smallrye.common.constraint.Assert;
final class ItemId {
private final Class extends BuildItem> itemType;
diff --git a/core/builder/src/main/java/io/quarkus/builder/ProduceFlags.java b/core/builder/src/main/java/io/quarkus/builder/ProduceFlags.java
index 8f8a8cf4e3488..c0a1ba046f2f1 100644
--- a/core/builder/src/main/java/io/quarkus/builder/ProduceFlags.java
+++ b/core/builder/src/main/java/io/quarkus/builder/ProduceFlags.java
@@ -1,8 +1,9 @@
package io.quarkus.builder;
-import org.wildfly.common.Assert;
import org.wildfly.common.flags.Flags;
+import io.smallrye.common.constraint.Assert;
+
/**
* Flags which can be set on consume declarations.
*/
diff --git a/core/builder/src/main/java/io/quarkus/builder/diag/Diagnostic.java b/core/builder/src/main/java/io/quarkus/builder/diag/Diagnostic.java
index bc14469557768..34ff41eeea9bd 100644
--- a/core/builder/src/main/java/io/quarkus/builder/diag/Diagnostic.java
+++ b/core/builder/src/main/java/io/quarkus/builder/diag/Diagnostic.java
@@ -5,9 +5,8 @@
import java.io.StringWriter;
import java.io.Writer;
-import org.wildfly.common.Assert;
-
import io.quarkus.builder.location.Location;
+import io.smallrye.common.constraint.Assert;
public final class Diagnostic {
private final Level level;
diff --git a/core/deployment/src/main/java/io/quarkus/deployment/BootstrapConfig.java b/core/deployment/src/main/java/io/quarkus/deployment/BootstrapConfig.java
index 2ccf7e384f420..9f34b56ad7005 100644
--- a/core/deployment/src/main/java/io/quarkus/deployment/BootstrapConfig.java
+++ b/core/deployment/src/main/java/io/quarkus/deployment/BootstrapConfig.java
@@ -1,5 +1,6 @@
package io.quarkus.deployment;
+import io.quarkus.bootstrap.model.ApplicationModel;
import io.quarkus.runtime.annotations.ConfigPhase;
import io.quarkus.runtime.annotations.ConfigRoot;
import io.smallrye.config.ConfigMapping;
@@ -47,13 +48,13 @@ public interface BootstrapConfig {
boolean disableJarCache();
/**
- * A temporary option introduced to avoid a logging warning when {@code -Dquarkus.bootstrap.incubating-model-resolver}
+ * A temporary option introduced to avoid a logging warning when {@code -Dquarkus.bootstrap.legacy-model-resolver}
* is added to the build command line.
- * This option enables an incubating implementation of the Quarkus Application Model resolver.
- * This option will be removed as soon as the incubating implementation becomes the default one.
+ * This option enables the legacy implementation of the Quarkus Application Model resolver.
+ * This option will be removed once the legacy {@link ApplicationModel} resolver implementation gets removed.
*/
@WithDefault("false")
- boolean incubatingModelResolver();
+ boolean legacyModelResolver();
/**
* Whether to throw an error, warn or silently ignore misaligned platform BOM imports
diff --git a/core/deployment/src/main/java/io/quarkus/deployment/Capabilities.java b/core/deployment/src/main/java/io/quarkus/deployment/Capabilities.java
index 1780bb3010b8a..f80dccea5d0a5 100644
--- a/core/deployment/src/main/java/io/quarkus/deployment/Capabilities.java
+++ b/core/deployment/src/main/java/io/quarkus/deployment/Capabilities.java
@@ -35,12 +35,6 @@ public Set getCapabilities() {
return capabilities;
}
- // @deprecated in 1.14.0.Final
- @Deprecated
- public boolean isCapabilityPresent(String capability) {
- return isPresent(capability);
- }
-
/**
* Checks whether a given capability is present during the build.
*
diff --git a/core/deployment/src/main/java/io/quarkus/deployment/CodeGenProvider.java b/core/deployment/src/main/java/io/quarkus/deployment/CodeGenProvider.java
index c06f0277f2031..8fbb056302d81 100644
--- a/core/deployment/src/main/java/io/quarkus/deployment/CodeGenProvider.java
+++ b/core/deployment/src/main/java/io/quarkus/deployment/CodeGenProvider.java
@@ -6,7 +6,6 @@
import java.util.Objects;
import org.eclipse.microprofile.config.Config;
-import org.wildfly.common.annotation.NotNull;
import io.quarkus.bootstrap.model.ApplicationModel;
import io.quarkus.bootstrap.prebuild.CodeGenException;
@@ -18,7 +17,6 @@ public interface CodeGenProvider {
/**
* @return unique name of the code gen provider, will correspond to the directory in generated-sources
*/
- @NotNull
String providerId();
/**
@@ -37,7 +35,6 @@ default String inputExtension() {
*
* @return file extensions
*/
- @NotNull
default String[] inputExtensions() {
if (inputExtension() != null) {
return new String[] { inputExtension() };
@@ -53,7 +50,6 @@ default String[] inputExtensions() {
*
* @return the input directory
*/
- @NotNull
String inputDirectory();
/**
diff --git a/core/deployment/src/main/java/io/quarkus/deployment/builditem/ConfigurationTypeBuildItem.java b/core/deployment/src/main/java/io/quarkus/deployment/builditem/ConfigurationTypeBuildItem.java
index 0820564b4af8d..85a162ee56faa 100644
--- a/core/deployment/src/main/java/io/quarkus/deployment/builditem/ConfigurationTypeBuildItem.java
+++ b/core/deployment/src/main/java/io/quarkus/deployment/builditem/ConfigurationTypeBuildItem.java
@@ -1,8 +1,7 @@
package io.quarkus.deployment.builditem;
-import org.wildfly.common.Assert;
-
import io.quarkus.builder.item.MultiBuildItem;
+import io.smallrye.common.constraint.Assert;
/**
* The configuration type build item. Every configuration type should be registered using this build item
diff --git a/core/deployment/src/main/java/io/quarkus/deployment/builditem/LogCategoryBuildItem.java b/core/deployment/src/main/java/io/quarkus/deployment/builditem/LogCategoryBuildItem.java
index 327a92d44be2c..5f97c5efcc86d 100644
--- a/core/deployment/src/main/java/io/quarkus/deployment/builditem/LogCategoryBuildItem.java
+++ b/core/deployment/src/main/java/io/quarkus/deployment/builditem/LogCategoryBuildItem.java
@@ -2,9 +2,8 @@
import java.util.logging.Level;
-import org.wildfly.common.Assert;
-
import io.quarkus.builder.item.MultiBuildItem;
+import io.smallrye.common.constraint.Assert;
/**
* Establish the default log level of a log category.
diff --git a/core/deployment/src/main/java/io/quarkus/deployment/builditem/LogConsoleFormatBuildItem.java b/core/deployment/src/main/java/io/quarkus/deployment/builditem/LogConsoleFormatBuildItem.java
index a30522477735a..1ded7eb6ade63 100644
--- a/core/deployment/src/main/java/io/quarkus/deployment/builditem/LogConsoleFormatBuildItem.java
+++ b/core/deployment/src/main/java/io/quarkus/deployment/builditem/LogConsoleFormatBuildItem.java
@@ -3,10 +3,9 @@
import java.util.Optional;
import java.util.logging.Formatter;
-import org.wildfly.common.Assert;
-
import io.quarkus.builder.item.MultiBuildItem;
import io.quarkus.runtime.RuntimeValue;
+import io.smallrye.common.constraint.Assert;
/**
* The log console format build item. Producing this item will cause the logging subsystem to disregard its
diff --git a/core/deployment/src/main/java/io/quarkus/deployment/builditem/LogFileFormatBuildItem.java b/core/deployment/src/main/java/io/quarkus/deployment/builditem/LogFileFormatBuildItem.java
index 2547b409e8984..cdb79e2b0401b 100644
--- a/core/deployment/src/main/java/io/quarkus/deployment/builditem/LogFileFormatBuildItem.java
+++ b/core/deployment/src/main/java/io/quarkus/deployment/builditem/LogFileFormatBuildItem.java
@@ -3,10 +3,9 @@
import java.util.Optional;
import java.util.logging.Formatter;
-import org.wildfly.common.Assert;
-
import io.quarkus.builder.item.MultiBuildItem;
import io.quarkus.runtime.RuntimeValue;
+import io.smallrye.common.constraint.Assert;
/**
* The log file format build item. Producing this item will cause the logging subsystem to disregard its
diff --git a/core/deployment/src/main/java/io/quarkus/deployment/builditem/LogHandlerBuildItem.java b/core/deployment/src/main/java/io/quarkus/deployment/builditem/LogHandlerBuildItem.java
index 963c17912dcc8..4cecbea94c652 100644
--- a/core/deployment/src/main/java/io/quarkus/deployment/builditem/LogHandlerBuildItem.java
+++ b/core/deployment/src/main/java/io/quarkus/deployment/builditem/LogHandlerBuildItem.java
@@ -3,10 +3,9 @@
import java.util.Optional;
import java.util.logging.Handler;
-import org.wildfly.common.Assert;
-
import io.quarkus.builder.item.MultiBuildItem;
import io.quarkus.runtime.RuntimeValue;
+import io.smallrye.common.constraint.Assert;
/**
* A build item for adding additional logging handlers.
diff --git a/core/deployment/src/main/java/io/quarkus/deployment/builditem/LogSocketFormatBuildItem.java b/core/deployment/src/main/java/io/quarkus/deployment/builditem/LogSocketFormatBuildItem.java
index 23aeaf109b955..519849e0d1400 100644
--- a/core/deployment/src/main/java/io/quarkus/deployment/builditem/LogSocketFormatBuildItem.java
+++ b/core/deployment/src/main/java/io/quarkus/deployment/builditem/LogSocketFormatBuildItem.java
@@ -3,10 +3,9 @@
import java.util.Optional;
import java.util.logging.Formatter;
-import org.wildfly.common.Assert;
-
import io.quarkus.builder.item.MultiBuildItem;
import io.quarkus.runtime.RuntimeValue;
+import io.smallrye.common.constraint.Assert;
/**
* The socket format build item. Producing this item will cause the logging subsystem to disregard its
diff --git a/core/deployment/src/main/java/io/quarkus/deployment/builditem/LogSyslogFormatBuildItem.java b/core/deployment/src/main/java/io/quarkus/deployment/builditem/LogSyslogFormatBuildItem.java
index f20812149a246..fb23437d10471 100644
--- a/core/deployment/src/main/java/io/quarkus/deployment/builditem/LogSyslogFormatBuildItem.java
+++ b/core/deployment/src/main/java/io/quarkus/deployment/builditem/LogSyslogFormatBuildItem.java
@@ -3,10 +3,9 @@
import java.util.Optional;
import java.util.logging.Formatter;
-import org.wildfly.common.Assert;
-
import io.quarkus.builder.item.MultiBuildItem;
import io.quarkus.runtime.RuntimeValue;
+import io.smallrye.common.constraint.Assert;
/**
* The syslog format build item. Producing this item will cause the logging subsystem to disregard its
diff --git a/core/deployment/src/main/java/io/quarkus/deployment/builditem/NamedLogHandlersBuildItem.java b/core/deployment/src/main/java/io/quarkus/deployment/builditem/NamedLogHandlersBuildItem.java
index 6764f740726de..e6f7b7e3dca93 100644
--- a/core/deployment/src/main/java/io/quarkus/deployment/builditem/NamedLogHandlersBuildItem.java
+++ b/core/deployment/src/main/java/io/quarkus/deployment/builditem/NamedLogHandlersBuildItem.java
@@ -3,10 +3,9 @@
import java.util.Map;
import java.util.logging.Handler;
-import org.wildfly.common.Assert;
-
import io.quarkus.builder.item.MultiBuildItem;
import io.quarkus.runtime.RuntimeValue;
+import io.smallrye.common.constraint.Assert;
/**
* A build item for adding additional named logging handlers.
diff --git a/core/deployment/src/main/java/io/quarkus/deployment/builditem/RunTimeConfigurationDefaultBuildItem.java b/core/deployment/src/main/java/io/quarkus/deployment/builditem/RunTimeConfigurationDefaultBuildItem.java
index aecb5c9186fbf..24e7f142a1a21 100644
--- a/core/deployment/src/main/java/io/quarkus/deployment/builditem/RunTimeConfigurationDefaultBuildItem.java
+++ b/core/deployment/src/main/java/io/quarkus/deployment/builditem/RunTimeConfigurationDefaultBuildItem.java
@@ -1,9 +1,8 @@
package io.quarkus.deployment.builditem;
-import org.wildfly.common.Assert;
-
import io.quarkus.builder.item.MultiBuildItem;
import io.quarkus.runtime.annotations.ConfigItem;
+import io.smallrye.common.constraint.Assert;
/**
* A build item which specifies a configuration default value for run time, which is used to establish a default other
diff --git a/core/deployment/src/main/java/io/quarkus/deployment/builditem/StreamingLogHandlerBuildItem.java b/core/deployment/src/main/java/io/quarkus/deployment/builditem/StreamingLogHandlerBuildItem.java
index 5271f0e8f11de..68688e60f973c 100644
--- a/core/deployment/src/main/java/io/quarkus/deployment/builditem/StreamingLogHandlerBuildItem.java
+++ b/core/deployment/src/main/java/io/quarkus/deployment/builditem/StreamingLogHandlerBuildItem.java
@@ -3,10 +3,9 @@
import java.util.Optional;
import java.util.logging.Handler;
-import org.wildfly.common.Assert;
-
import io.quarkus.builder.item.SimpleBuildItem;
import io.quarkus.runtime.RuntimeValue;
+import io.smallrye.common.constraint.Assert;
/**
* A build item for adding the dev stream log via mutiny.
diff --git a/core/deployment/src/main/java/io/quarkus/deployment/configuration/BuildTimeConfigurationReader.java b/core/deployment/src/main/java/io/quarkus/deployment/configuration/BuildTimeConfigurationReader.java
index 1d3af46be5a0a..c319892a9da0f 100644
--- a/core/deployment/src/main/java/io/quarkus/deployment/configuration/BuildTimeConfigurationReader.java
+++ b/core/deployment/src/main/java/io/quarkus/deployment/configuration/BuildTimeConfigurationReader.java
@@ -37,7 +37,6 @@
import org.eclipse.microprofile.config.spi.ConfigSource;
import org.eclipse.microprofile.config.spi.Converter;
import org.jboss.logging.Logger;
-import org.wildfly.common.Assert;
import io.quarkus.deployment.configuration.definition.ClassDefinition;
import io.quarkus.deployment.configuration.definition.ClassDefinition.ClassMember;
@@ -72,6 +71,7 @@
import io.quarkus.runtime.configuration.HyphenateEnumConverter;
import io.quarkus.runtime.configuration.NameIterator;
import io.quarkus.runtime.configuration.PropertiesUtil;
+import io.smallrye.common.constraint.Assert;
import io.smallrye.config.ConfigMapping;
import io.smallrye.config.ConfigMappings;
import io.smallrye.config.ConfigMappings.ConfigClass;
@@ -651,7 +651,7 @@ ReadResult run() {
}
if (runTimeNames.contains(name)) {
unknownBuildProperties.remove(property);
- ConfigValue value = runtimeConfig.getConfigValue(property);
+ ConfigValue value = withoutExpansion(() -> runtimeConfig.getConfigValue(property));
if (value.getRawValue() != null) {
runTimeValues.put(value.getNameProfiled(), value.noProblems().withValue(value.getRawValue()));
}
diff --git a/core/deployment/src/main/java/io/quarkus/deployment/configuration/ClassLoadingConfig.java b/core/deployment/src/main/java/io/quarkus/deployment/configuration/ClassLoadingConfig.java
index 3eb7b693f46ec..ae14536df79d6 100644
--- a/core/deployment/src/main/java/io/quarkus/deployment/configuration/ClassLoadingConfig.java
+++ b/core/deployment/src/main/java/io/quarkus/deployment/configuration/ClassLoadingConfig.java
@@ -6,9 +6,9 @@
import java.util.Set;
import io.quarkus.runtime.annotations.ConfigDocMapKey;
-import io.quarkus.runtime.annotations.ConfigItem;
import io.quarkus.runtime.annotations.ConfigPhase;
import io.quarkus.runtime.annotations.ConfigRoot;
+import io.smallrye.config.ConfigMapping;
/**
* Class loading
@@ -18,7 +18,8 @@
* This is because it is needed before any of the config infrastructure is set up.
*/
@ConfigRoot(phase = ConfigPhase.BUILD_TIME)
-public class ClassLoadingConfig {
+@ConfigMapping(prefix = "quarkus.class-loading")
+public interface ClassLoadingConfig {
/**
* Artifacts that are loaded in a parent first manner. This can be used to work around issues where a given
@@ -30,8 +31,7 @@ public class ClassLoadingConfig {
*
* WARNING: This config property can only be set in application.properties
*/
- @ConfigItem(defaultValue = "")
- public Optional> parentFirstArtifacts;
+ Optional> parentFirstArtifacts();
/**
* Artifacts that are loaded in the runtime ClassLoader in dev mode, so they will be dropped
@@ -48,15 +48,13 @@ public class ClassLoadingConfig {
*
* WARNING: This config property can only be set in application.properties
*/
- @ConfigItem(defaultValue = "")
- public Optional reloadableArtifacts;
+ Optional reloadableArtifacts();
/**
* Artifacts that will never be loaded by the class loader, and will not be packed into the final application. This allows
* you to explicitly remove artifacts from your application even though they may be present on the class path.
*/
- @ConfigItem(defaultValue = "")
- public Optional> removedArtifacts;
+ Optional> removedArtifacts();
/**
* Resources that should be removed/hidden from dependencies.
@@ -73,8 +71,7 @@ public class ClassLoadingConfig {
*
* Note that for technical reasons this is not supported when running with JBang.
*/
- @ConfigItem
@ConfigDocMapKey("group-id:artifact-id")
- public Map> removedResources;
+ Map> removedResources();
}
diff --git a/core/deployment/src/main/java/io/quarkus/deployment/configuration/RunTimeConfigurationGenerator.java b/core/deployment/src/main/java/io/quarkus/deployment/configuration/RunTimeConfigurationGenerator.java
index 5c7089a68ec5e..9288e8f7127f0 100644
--- a/core/deployment/src/main/java/io/quarkus/deployment/configuration/RunTimeConfigurationGenerator.java
+++ b/core/deployment/src/main/java/io/quarkus/deployment/configuration/RunTimeConfigurationGenerator.java
@@ -24,7 +24,6 @@
import org.eclipse.microprofile.config.spi.ConfigBuilder;
import org.eclipse.microprofile.config.spi.Converter;
import org.objectweb.asm.Opcodes;
-import org.wildfly.common.Assert;
import io.quarkus.deployment.AccessorFinder;
import io.quarkus.deployment.configuration.definition.ClassDefinition;
@@ -64,6 +63,7 @@
import io.quarkus.runtime.configuration.NameIterator;
import io.quarkus.runtime.configuration.PropertiesUtil;
import io.quarkus.runtime.configuration.QuarkusConfigFactory;
+import io.smallrye.common.constraint.Assert;
import io.smallrye.config.ConfigMappings;
import io.smallrye.config.ConfigMappings.ConfigClass;
import io.smallrye.config.Converters;
@@ -99,10 +99,10 @@ public final class RunTimeConfigurationGenerator {
static final MethodDescriptor CD_MISSING_VALUE = MethodDescriptor.ofMethod(ConfigDiagnostic.class, "missingValue",
void.class, String.class, NoSuchElementException.class);
static final MethodDescriptor CD_RESET_ERROR = MethodDescriptor.ofMethod(ConfigDiagnostic.class, "resetError", void.class);
- static final MethodDescriptor CD_UNKNOWN_PROPERTIES = MethodDescriptor.ofMethod(ConfigDiagnostic.class, "unknownProperties",
+ static final MethodDescriptor CD_REPORT_UNKNOWN = MethodDescriptor.ofMethod(ConfigDiagnostic.class, "reportUnknown",
void.class, Set.class);
- static final MethodDescriptor CD_UNKNOWN_PROPERTIES_RT = MethodDescriptor.ofMethod(ConfigDiagnostic.class,
- "unknownPropertiesRuntime", void.class, Set.class);
+ static final MethodDescriptor CD_REPORT_UNKNOWN_RUNTIME = MethodDescriptor.ofMethod(ConfigDiagnostic.class,
+ "reportUnknownRuntime", void.class, Set.class);
static final MethodDescriptor CONVS_NEW_ARRAY_CONVERTER = MethodDescriptor.ofMethod(Converters.class,
"newArrayConverter", Converter.class, Converter.class, Class.class);
@@ -449,14 +449,14 @@ public void run() {
// generate sweep for clinit
configSweepLoop(siParserBody, clinit, clinitConfig, getRegisteredRoots(BUILD_AND_RUN_TIME_FIXED), Type.BUILD_TIME);
- clinit.invokeStaticMethod(CD_UNKNOWN_PROPERTIES, clinit.readStaticField(C_UNKNOWN));
+ clinit.invokeStaticMethod(CD_REPORT_UNKNOWN, clinit.readStaticField(C_UNKNOWN));
if (liveReloadPossible) {
configSweepLoop(siParserBody, readConfig, runTimeConfig, getRegisteredRoots(RUN_TIME), Type.RUNTIME);
}
// generate sweep for run time
configSweepLoop(rtParserBody, readConfig, runTimeConfig, getRegisteredRoots(RUN_TIME), Type.RUNTIME);
- readConfig.invokeStaticMethod(CD_UNKNOWN_PROPERTIES_RT, readConfig.readStaticField(C_UNKNOWN_RUNTIME));
+ readConfig.invokeStaticMethod(CD_REPORT_UNKNOWN_RUNTIME, readConfig.readStaticField(C_UNKNOWN_RUNTIME));
// generate ensure-initialized method
// the point of this method is simply to initialize the Config class
diff --git a/core/deployment/src/main/java/io/quarkus/deployment/configuration/definition/ClassDefinition.java b/core/deployment/src/main/java/io/quarkus/deployment/configuration/definition/ClassDefinition.java
index 33d1713e78f6a..45d7e802a2ffe 100644
--- a/core/deployment/src/main/java/io/quarkus/deployment/configuration/definition/ClassDefinition.java
+++ b/core/deployment/src/main/java/io/quarkus/deployment/configuration/definition/ClassDefinition.java
@@ -5,11 +5,10 @@
import java.util.LinkedHashMap;
import java.util.Map;
-import org.wildfly.common.Assert;
-
import io.quarkus.gizmo.FieldDescriptor;
import io.quarkus.runtime.annotations.ConfigItem;
import io.quarkus.runtime.util.StringUtil;
+import io.smallrye.common.constraint.Assert;
/**
*
diff --git a/core/deployment/src/main/java/io/quarkus/deployment/configuration/definition/RootDefinition.java b/core/deployment/src/main/java/io/quarkus/deployment/configuration/definition/RootDefinition.java
index bd0122ab96e69..e5207265f6478 100644
--- a/core/deployment/src/main/java/io/quarkus/deployment/configuration/definition/RootDefinition.java
+++ b/core/deployment/src/main/java/io/quarkus/deployment/configuration/definition/RootDefinition.java
@@ -9,11 +9,10 @@
import java.util.List;
-import org.wildfly.common.Assert;
-
import io.quarkus.gizmo.FieldDescriptor;
import io.quarkus.runtime.annotations.ConfigItem;
import io.quarkus.runtime.annotations.ConfigPhase;
+import io.smallrye.common.constraint.Assert;
/**
*
diff --git a/core/deployment/src/main/java/io/quarkus/deployment/configuration/matching/ConfigPatternMap.java b/core/deployment/src/main/java/io/quarkus/deployment/configuration/matching/ConfigPatternMap.java
index fbf525fc1c6bf..b0d941ad66d02 100644
--- a/core/deployment/src/main/java/io/quarkus/deployment/configuration/matching/ConfigPatternMap.java
+++ b/core/deployment/src/main/java/io/quarkus/deployment/configuration/matching/ConfigPatternMap.java
@@ -6,9 +6,8 @@
import java.util.TreeMap;
import java.util.function.BiFunction;
-import org.wildfly.common.Assert;
-
import io.quarkus.runtime.configuration.NameIterator;
+import io.smallrye.common.constraint.Assert;
/**
* A pattern-matching mapping of configuration key pattern to value.
diff --git a/core/deployment/src/main/java/io/quarkus/deployment/configuration/matching/FieldContainer.java b/core/deployment/src/main/java/io/quarkus/deployment/configuration/matching/FieldContainer.java
index 83235050cf98b..2b306bbf6510c 100644
--- a/core/deployment/src/main/java/io/quarkus/deployment/configuration/matching/FieldContainer.java
+++ b/core/deployment/src/main/java/io/quarkus/deployment/configuration/matching/FieldContainer.java
@@ -1,9 +1,8 @@
package io.quarkus.deployment.configuration.matching;
-import org.wildfly.common.Assert;
-
import io.quarkus.deployment.configuration.definition.ClassDefinition;
import io.quarkus.deployment.configuration.definition.RootDefinition;
+import io.smallrye.common.constraint.Assert;
/**
*
diff --git a/core/deployment/src/main/java/io/quarkus/deployment/configuration/matching/MapContainer.java b/core/deployment/src/main/java/io/quarkus/deployment/configuration/matching/MapContainer.java
index 9360e81635e07..126b6dae51415 100644
--- a/core/deployment/src/main/java/io/quarkus/deployment/configuration/matching/MapContainer.java
+++ b/core/deployment/src/main/java/io/quarkus/deployment/configuration/matching/MapContainer.java
@@ -1,8 +1,7 @@
package io.quarkus.deployment.configuration.matching;
-import org.wildfly.common.Assert;
-
import io.quarkus.deployment.configuration.definition.ClassDefinition;
+import io.smallrye.common.constraint.Assert;
/**
* A map container.
diff --git a/core/deployment/src/main/java/io/quarkus/deployment/dev/testing/AppMakerHelper.java b/core/deployment/src/main/java/io/quarkus/deployment/dev/testing/AppMakerHelper.java
new file mode 100644
index 0000000000000..04b666f3335b8
--- /dev/null
+++ b/core/deployment/src/main/java/io/quarkus/deployment/dev/testing/AppMakerHelper.java
@@ -0,0 +1,362 @@
+package io.quarkus.deployment.dev.testing;
+
+import static io.quarkus.test.common.PathTestHelper.getAppClassLocationForTestLocation;
+import static io.quarkus.test.common.PathTestHelper.getTestClassesLocation;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Consumer;
+import java.util.stream.Collectors;
+
+import jakarta.enterprise.inject.Alternative;
+
+import org.jboss.jandex.Index;
+
+import io.quarkus.bootstrap.BootstrapConstants;
+import io.quarkus.bootstrap.BootstrapException;
+import io.quarkus.bootstrap.app.AugmentAction;
+import io.quarkus.bootstrap.app.CuratedApplication;
+import io.quarkus.bootstrap.app.QuarkusBootstrap;
+import io.quarkus.bootstrap.app.StartupAction;
+import io.quarkus.bootstrap.model.ApplicationModel;
+import io.quarkus.bootstrap.resolver.AppModelResolverException;
+import io.quarkus.bootstrap.runner.Timing;
+import io.quarkus.bootstrap.utils.BuildToolHelper;
+import io.quarkus.bootstrap.workspace.ArtifactSources;
+import io.quarkus.bootstrap.workspace.SourceDir;
+import io.quarkus.bootstrap.workspace.WorkspaceModule;
+import io.quarkus.commons.classloading.ClassLoaderHelper;
+import io.quarkus.paths.PathList;
+import io.quarkus.runtime.LaunchMode;
+import io.quarkus.test.common.PathTestHelper;
+import io.quarkus.test.common.RestorableSystemProperties;
+import io.quarkus.test.junit.QuarkusTestProfile;
+import io.quarkus.test.junit.TestBuildChainFunction;
+
+public class AppMakerHelper {
+
+ // Copied from superclass of thing we copied
+ protected static final String TEST_LOCATION = "test-location";
+ protected static final String TEST_CLASS = "test-class";
+ protected static final String TEST_PROFILE = "test-profile";
+ /// end copied
+
+ private static Class> quarkusTestMethodContextClass;
+ private static boolean hasPerTestResources;
+
+ private static List
+
+ io.quarkus
+ quarkus-micrometer-opentelemetry
+ ${project.version}
+ pom
+ test
+
+
+ *
+ *
+
+
+ io.quarkusquarkus-micrometer-registry-prometheus
@@ -1864,6 +1877,19 @@
+
+ io.quarkus
+ quarkus-oidc-token-propagation-common
+ ${project.version}
+ pom
+ test
+
+
+ *
+ *
+
+
+ io.quarkusquarkus-openshift
diff --git a/devtools/gradle/gradle/libs.versions.toml b/devtools/gradle/gradle/libs.versions.toml
index c9af6e44aa83f..d5d736e288b29 100644
--- a/devtools/gradle/gradle/libs.versions.toml
+++ b/devtools/gradle/gradle/libs.versions.toml
@@ -1,9 +1,9 @@
[versions]
-plugin-publish = "1.3.0"
+plugin-publish = "1.3.1"
# updating Kotlin here makes QuarkusPluginTest > shouldNotFailOnProjectDependenciesWithoutMain(Path) fail
kotlin = "2.0.21"
-smallrye-config = "3.11.1"
+smallrye-config = "3.11.2"
junit5 = "5.10.5"
assertj = "3.27.3"
diff --git a/devtools/gradle/settings.gradle.kts b/devtools/gradle/settings.gradle.kts
index 6532acd5a053f..da5eaa3e276e4 100644
--- a/devtools/gradle/settings.gradle.kts
+++ b/devtools/gradle/settings.gradle.kts
@@ -1,5 +1,5 @@
plugins {
- id("com.gradle.develocity") version "3.19"
+ id("com.gradle.develocity") version "3.19.1"
}
develocity {
diff --git a/devtools/maven/src/main/java/io/quarkus/maven/DependencySbomMojo.java b/devtools/maven/src/main/java/io/quarkus/maven/DependencySbomMojo.java
index eec9de1fabb90..049be7d61d1e8 100644
--- a/devtools/maven/src/main/java/io/quarkus/maven/DependencySbomMojo.java
+++ b/devtools/maven/src/main/java/io/quarkus/maven/DependencySbomMojo.java
@@ -20,7 +20,6 @@
import io.quarkus.bootstrap.resolver.BootstrapAppModelResolver;
import io.quarkus.bootstrap.resolver.maven.BootstrapMavenContext;
import io.quarkus.bootstrap.resolver.maven.EffectiveModelResolver;
-import io.quarkus.bootstrap.resolver.maven.IncubatingApplicationModelResolver;
import io.quarkus.bootstrap.resolver.maven.MavenArtifactResolver;
import io.quarkus.cyclonedx.generator.CycloneDxSbomGenerator;
import io.quarkus.maven.components.QuarkusWorkspaceProvider;
@@ -131,9 +130,7 @@ private ApplicationModel resolveApplicationModel()
"Parameter 'mode' was set to '" + mode + "' while expected one of 'dev', 'test' or 'prod'");
}
}
- // enable the incubating model resolver impl by default for this mojo
- modelResolver.setIncubatingModelResolver(
- !IncubatingApplicationModelResolver.isIncubatingModelResolverProperty(project.getProperties(), "false"));
+ modelResolver.setLegacyModelResolver(BootstrapAppModelResolver.isLegacyModelResolver(project.getProperties()));
return modelResolver.resolveModel(appArtifact);
} catch (Exception e) {
throw new MojoExecutionException("Failed to resolve application model " + appArtifact + " dependencies", e);
diff --git a/devtools/maven/src/main/java/io/quarkus/maven/DependencyTreeMojo.java b/devtools/maven/src/main/java/io/quarkus/maven/DependencyTreeMojo.java
index 7de487b87c0de..89f2223b14afb 100644
--- a/devtools/maven/src/main/java/io/quarkus/maven/DependencyTreeMojo.java
+++ b/devtools/maven/src/main/java/io/quarkus/maven/DependencyTreeMojo.java
@@ -24,7 +24,6 @@
import io.quarkus.bootstrap.resolver.BootstrapAppModelResolver;
import io.quarkus.bootstrap.resolver.maven.BootstrapMavenContext;
import io.quarkus.bootstrap.resolver.maven.DependencyLoggingConfig;
-import io.quarkus.bootstrap.resolver.maven.IncubatingApplicationModelResolver;
import io.quarkus.bootstrap.resolver.maven.MavenArtifactResolver;
import io.quarkus.maven.components.QuarkusWorkspaceProvider;
import io.quarkus.maven.dependency.ArtifactCoords;
@@ -55,7 +54,7 @@ public class DependencyTreeMojo extends AbstractMojo {
String mode;
/**
- * INCUBATING option, enabled with @{code -Dquarkus.bootstrap.incubating-model-resolver} system or project property.
+ * INCUBATING option, enabled with {@code -Dquarkus.bootstrap.incubating-model-resolver} system or project property.
*
* Whether to log dependency properties, such as on which classpath they belong, whether they are hot-reloadable in dev
* mode, etc.
@@ -64,7 +63,7 @@ public class DependencyTreeMojo extends AbstractMojo {
boolean verbose;
/**
- * INCUBATING option, enabled with @{code -Dquarkus.bootstrap.incubating-model-resolver} system or project property.
+ * INCUBATING option, enabled with {@code -Dquarkus.bootstrap.incubating-model-resolver} system or project property.
*
* Whether to log all dependencies of each dependency node in a tree, adding {@code [+]} suffix
* to those whose dependencies are not expanded.
@@ -154,9 +153,7 @@ private void logTree(final Consumer log) throws MojoExecutionException {
"Parameter 'mode' was set to '" + mode + "' while expected one of 'dev', 'test' or 'prod'");
}
}
- // enable the incubating model resolver impl by default for this mojo
- modelResolver.setIncubatingModelResolver(
- !IncubatingApplicationModelResolver.isIncubatingModelResolverProperty(project.getProperties(), "false"));
+ modelResolver.setLegacyModelResolver(BootstrapAppModelResolver.isLegacyModelResolver(project.getProperties()));
modelResolver.setDepLogConfig(DependencyLoggingConfig.builder()
.setMessageConsumer(log)
.setVerbose(verbose)
diff --git a/devtools/maven/src/main/java/io/quarkus/maven/DevMojo.java b/devtools/maven/src/main/java/io/quarkus/maven/DevMojo.java
index b4c61c4377408..99da1770f7cf1 100644
--- a/devtools/maven/src/main/java/io/quarkus/maven/DevMojo.java
+++ b/devtools/maven/src/main/java/io/quarkus/maven/DevMojo.java
@@ -97,7 +97,6 @@
import io.quarkus.bootstrap.resolver.BootstrapAppModelResolver;
import io.quarkus.bootstrap.resolver.maven.BootstrapMavenContext;
import io.quarkus.bootstrap.resolver.maven.BootstrapMavenContextConfig;
-import io.quarkus.bootstrap.resolver.maven.IncubatingApplicationModelResolver;
import io.quarkus.bootstrap.resolver.maven.MavenArtifactResolver;
import io.quarkus.bootstrap.util.BootstrapUtils;
import io.quarkus.bootstrap.workspace.ArtifactSources;
@@ -151,6 +150,7 @@ public class DevMojo extends AbstractMojo {
private static final String ORG_APACHE_MAVEN_PLUGINS = "org.apache.maven.plugins";
private static final String MAVEN_COMPILER_PLUGIN = "maven-compiler-plugin";
private static final String MAVEN_RESOURCES_PLUGIN = "maven-resources-plugin";
+ private static final String MAVEN_SUREFIRE_PLUGIN = "maven-surefire-plugin";
private static final String MAVEN_TOOLCHAINS_PLUGIN = "maven-toolchains-plugin";
private static final String ORG_JETBRAINS_KOTLIN = "org.jetbrains.kotlin";
@@ -263,6 +263,22 @@ public class DevMojo extends AbstractMojo {
@Parameter
private Map systemProperties = Map.of();
+ /**
+ * When enabled, the {@code } and {@code }
+ * elements of the Maven Surefire plugin are copied to environment variables and system
+ * properties defined by this plugin. Note that no other Surefire configuration is used
+ * (notably {@code }), only the 2 elements mentioned above.
+ *
+ * This plugin's {@code } and {@code } have
+ * priority, so duplicate keys are not copied.
+ *
+
+ io.quarkus
+ quarkus-micrometer-opentelemetry-deployment
+ ${project.version}
+ pom
+ test
+
+
+ *
+ *
+
+
+ io.quarkusquarkus-micrometer-registry-prometheus-deployment
@@ -1875,6 +1888,19 @@
+
+ io.quarkus
+ quarkus-oidc-token-propagation-common-deployment
+ ${project.version}
+ pom
+ test
+
+
+ *
+ *
+
+
+ io.quarkusquarkus-openshift-deployment
diff --git a/docs/src/main/asciidoc/_includes/snip-note-derby.adoc b/docs/src/main/asciidoc/_includes/snip-note-derby.adoc
index e69de29bb2d1d..4fdf7f13d66e0 100644
--- a/docs/src/main/asciidoc/_includes/snip-note-derby.adoc
+++ b/docs/src/main/asciidoc/_includes/snip-note-derby.adoc
@@ -0,0 +1,4 @@
+////
+This file is intentionally empty.
+It is used for compatibility with downstream systems, where this empty snippet is replaced with an equivalent that carries a required note.
+////
\ No newline at end of file
diff --git a/docs/src/main/asciidoc/_includes/snip-note-encrypted-pem-tech-prev.adoc b/docs/src/main/asciidoc/_includes/snip-note-encrypted-pem-tech-prev.adoc
new file mode 100644
index 0000000000000..4fdf7f13d66e0
--- /dev/null
+++ b/docs/src/main/asciidoc/_includes/snip-note-encrypted-pem-tech-prev.adoc
@@ -0,0 +1,4 @@
+////
+This file is intentionally empty.
+It is used for compatibility with downstream systems, where this empty snippet is replaced with an equivalent that carries a required note.
+////
\ No newline at end of file
diff --git a/docs/src/main/asciidoc/aws-lambda.adoc b/docs/src/main/asciidoc/aws-lambda.adoc
index ec05de3639b8b..7fe1087152132 100644
--- a/docs/src/main/asciidoc/aws-lambda.adoc
+++ b/docs/src/main/asciidoc/aws-lambda.adoc
@@ -586,7 +586,7 @@ To extract the required ssl, you must start up a Docker container in the backgro
First, let's start the GraalVM container, noting the container id output.
[source,bash,subs=attributes+]
----
-docker run -it -d --entrypoint bash quay.io/quarkus/ubi-quarkus-mandrel-builder-image:{mandrel-flavor}
+docker run -it -d --entrypoint bash quay.io/quarkus/ubi9-quarkus-mandrel-builder-image:{mandrel-flavor}
# This will output a container id, like 6304eea6179522aff69acb38eca90bedfd4b970a5475aa37ccda3585bc2abdde
# Note this value as we will need it for the commands below
diff --git a/docs/src/main/asciidoc/building-native-image.adoc b/docs/src/main/asciidoc/building-native-image.adoc
index baa9be1b69fb9..523539ae17cd8 100644
--- a/docs/src/main/asciidoc/building-native-image.adoc
+++ b/docs/src/main/asciidoc/building-native-image.adoc
@@ -255,7 +255,7 @@ To see the `GreetingResourceIT` run against the native executable, use `./mvnw v
$ ./mvnw verify -Dnative
...
Finished generating 'getting-started-1.0.0-SNAPSHOT-runner' in 22.0s.
-[INFO] [io.quarkus.deployment.pkg.steps.NativeImageBuildRunner] docker run --env LANG=C --rm --user 1000:1000 -v /home/zakkak/code/quarkus-quickstarts/getting-started/target/getting-started-1.0.0-SNAPSHOT-native-image-source-jar:/project:z --entrypoint /bin/bash quay.io/quarkus/ubi-quarkus-mandrel-builder-image:{mandrel-flavor} -c objcopy --strip-debug getting-started-1.0.0-SNAPSHOT-runner
+[INFO] [io.quarkus.deployment.pkg.steps.NativeImageBuildRunner] docker run --env LANG=C --rm --user 1000:1000 -v /home/zakkak/code/quarkus-quickstarts/getting-started/target/getting-started-1.0.0-SNAPSHOT-native-image-source-jar:/project:z --entrypoint /bin/bash quay.io/quarkus/ubi9-quarkus-mandrel-builder-image:{mandrel-flavor} -c objcopy --strip-debug getting-started-1.0.0-SNAPSHOT-runner
[INFO] [io.quarkus.deployment.QuarkusAugmentor] Quarkus augmentation completed in 70686ms
[INFO]
[INFO] --- maven-failsafe-plugin:3.0.0-M7:integration-test (default) @ getting-started ---
@@ -421,6 +421,25 @@ These are regular Quarkus config properties, so if you always want to build in a
it is recommended you add these to your `application.properties` in order to avoid specifying them every time.
====
+Executable built that way with the container runtime will be a 64-bit Linux executable, so depending on your operating system it may no longer be runnable.
+
+[IMPORTANT]
+====
+Starting with Quarkus 3.19+, the _builder_ image used to build the native executable is based on UBI 9.
+It means that the native executable produced by the container build will be based on UBI 9 as well.
+So, if you plan to build a container, make sure that the base image in your `Dockerfile` is compatible with UBI 9.
+The native executable will not run on UBI 8 base images.
+
+You can configure the builder image used for the container build by setting the `quarkus.native.builder-image` property.
+For example to switch back to an UBI8 _builder image_ you can use:
+
+`quarkus.native.builder-image=quay.io/quarkus/ubi-quarkus-mandrel-builder-image:{mandrel-flavor}`
+
+You can see the available tags for UBI8 https://quay.io/repository/quarkus/ubi-quarkus-mandrel-builder-image?tab=tags[here]
+and for UBI9 https://quay.io/repository/quarkus/ubi9-quarkus-mandrel-builder-image?tab=tags[here (UBI 9)])
+
+====
+
[[tip-quarkus-native-remote-container-build]]
[TIP]
====
@@ -434,6 +453,7 @@ In this case, use the parameter `-Dquarkus.native.remote-container-build=true` i
The reason for this is that the local build driver invoked through `-Dquarkus.native.container-build=true` uses volume mounts to make the JAR available in the build container, but volume mounts do not work with remote daemons. The remote container build driver copies the necessary files instead of mounting them. Note that even though the remote driver also works with local daemons, the local driver should be preferred in the local case because mounting is usually more performant than copying.
====
+
[TIP]
====
Building with GraalVM instead of Mandrel requires a custom builder image parameter to be passed additionally:
@@ -446,7 +466,8 @@ Please note that the above command points to a floating tag.
It is highly recommended to use the floating tag,
so that your builder image remains up-to-date and secure.
If you absolutely must, you may hard-code to a specific tag
-(see https://quay.io/repository/quarkus/ubi-quarkus-mandrel-builder-image?tab=tags[here] for available tags),
+(see https://quay.io/repository/quarkus/ubi-quarkus-mandrel-builder-image?tab=tags[here (UBI 8)]
+and https://quay.io/repository/quarkus/ubi9-quarkus-mandrel-builder-image?tab=tags[here (UBI 9)] for available tags),
but be aware that you won't get security updates that way and it's unsupported.
====
@@ -493,25 +514,30 @@ The project generation has provided a `Dockerfile.native-micro` in the `src/main
[source,dockerfile]
----
-FROM quay.io/quarkus/quarkus-micro-image:2.0
+FROM quay.io/quarkus/ubi9-quarkus-micro-image:2.0
WORKDIR /work/
-COPY target/*-runner /work/application
-RUN chmod 775 /work
+RUN chown 1001 /work \
+ && chmod "g+rwX" /work \
+ && chown 1001:root /work
+COPY --chown=1001:root --chmod=755 target/*-runner /work/application
+
EXPOSE 8080
-CMD ["./application", "-Dquarkus.http.host=0.0.0.0"]
+USER 1001
+
+ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"]
----
[NOTE]
.Quarkus Micro Image?
====
The Quarkus Micro Image is a small container image providing the right set of dependencies to run your native application.
-It is based on https://catalog.redhat.com/software/containers/ubi8-micro/601a84aadd19c7786c47c8ea?container-tabs=overview[UBI Micro].
+It is based on https://catalog.redhat.com/software/containers/ubi9-micro/61832b36dd607bfc82e66399?container-tabs=overview[UBI Micro].
This base image has been tailored to work perfectly in containers.
You can read more about UBI images on:
* https://www.redhat.com/en/blog/introducing-red-hat-universal-base-image[Introduction to Universal Base Image]
-* https://catalog.redhat.com/software/container-stacks/detail/5ec53f50ef29fd35586d9a56[Red Hat Universal Base Image 8]
+* https://catalog.redhat.com/software/containers/ubi9/ubi/615bcf606feffc5384e8452e[Red Hat Universal Base Image 9]
UBI images can be used without any limitations.
@@ -538,12 +564,12 @@ The project generation has also provided a `Dockerfile.native` in the `src/main/
[source,dockerfile]
----
-FROM registry.access.redhat.com/ubi8/ubi-minimal:8.10
+FROM registry.access.redhat.com/ubi9/ubi-minimal:9.5
WORKDIR /work/
RUN chown 1001 /work \
&& chmod "g+rwX" /work \
&& chown 1001:root /work
-COPY --chown=1001:root target/*-runner /work/application
+COPY --chown=1001:root --chmod=0755 target/*-runner /work/application
EXPOSE 8080
USER 1001
@@ -578,18 +604,18 @@ Sample Dockerfile for building with Maven:
[source,dockerfile,subs=attributes+]
----
## Stage 1 : build with maven builder image with native capabilities
-FROM quay.io/quarkus/ubi-quarkus-mandrel-builder-image:{mandrel-flavor} AS build
+FROM quay.io/quarkus/ubi9-quarkus-mandrel-builder-image:{mandrel-flavor} AS build
COPY --chown=quarkus:quarkus --chmod=0755 mvnw /code/mvnw
COPY --chown=quarkus:quarkus .mvn /code/.mvn
COPY --chown=quarkus:quarkus pom.xml /code/
USER quarkus
WORKDIR /code
-RUN ./mvnw -B org.apache.maven.plugins:maven-dependency-plugin:3.1.2:go-offline
+RUN ./mvnw -B org.apache.maven.plugins:maven-dependency-plugin:3.8.1:go-offline
COPY src /code/src
RUN ./mvnw package -Dnative
## Stage 2 : create the docker final image
-FROM quay.io/quarkus/quarkus-micro-image:2.0
+FROM quay.io/quarkus/ubi9-quarkus-micro-image:2.0
WORKDIR /work/
COPY --from=build /code/target/*-runner /work/application
@@ -616,7 +642,7 @@ Sample Dockerfile for building with Gradle:
[source,dockerfile,subs=attributes+]
----
## Stage 1 : build with maven builder image with native capabilities
-FROM quay.io/quarkus/ubi-quarkus-mandrel-builder-image:{mandrel-flavor} AS build
+FROM quay.io/quarkus/ubi9-quarkus-mandrel-builder-image:{mandrel-flavor} AS build
USER root
RUN microdnf install findutils
COPY --chown=quarkus:quarkus gradlew /code/gradlew
@@ -630,7 +656,7 @@ COPY src /code/src
RUN ./gradlew build -Dquarkus.native.enabled=true
## Stage 2 : create the docker final image
-FROM quay.io/quarkus/quarkus-micro-image:2.0
+FROM quay.io/quarkus/ubi9-quarkus-micro-image:2.0
WORKDIR /work/
COPY --from=build /code/build/*-runner /work/application
RUN chmod 775 /work
@@ -661,7 +687,7 @@ Please see xref:native-and-ssl.adoc#working-with-containers[our Using SSL With N
[NOTE,subs=attributes+]
====
-To use GraalVM CE instead of Mandrel, update the `FROM` clause to: `FROM quay.io/quarkus/ubi-quarkus-graalvmce-builder-image:{graalvm-flavor} AS build`.
+To use GraalVM CE instead of Mandrel, update the `FROM` clause to: `FROM quay.io/quarkus/ubi9-quarkus-graalvmce-builder-image:{graalvm-flavor} AS build`.
====
=== Using a Distroless base image
@@ -702,7 +728,7 @@ Sample multistage Dockerfile for building an image from `scratch`:
[source,dockerfile,subs=attributes+]
----
## Stage 1 : build with maven builder image with native capabilities
-FROM quay.io/quarkus/ubi-quarkus-graalvmce-builder-image:{graalvm-flavor} AS build
+FROM quay.io/quarkus/ubi9-quarkus-graalvmce-builder-image:{graalvm-flavor} AS build
USER root
RUN microdnf install make gcc
COPY --chown=quarkus:quarkus mvnw /code/mvnw
@@ -719,7 +745,7 @@ RUN mkdir /musl && \
ENV PATH="/musl/bin:${PATH}"
USER quarkus
WORKDIR /code
-RUN ./mvnw -B org.apache.maven.plugins:maven-dependency-plugin:3.1.2:go-offline
+RUN ./mvnw -B org.apache.maven.plugins:maven-dependency-plugin:3.8.1:go-offline
COPY src /code/src
RUN ./mvnw package -Dnative -DskipTests -Dquarkus.native.additional-build-args="--static","--libc=musl"
diff --git a/docs/src/main/asciidoc/cdi-reference.adoc b/docs/src/main/asciidoc/cdi-reference.adoc
index cd714fcf13433..365e5069ababc 100644
--- a/docs/src/main/asciidoc/cdi-reference.adoc
+++ b/docs/src/main/asciidoc/cdi-reference.adoc
@@ -616,9 +616,9 @@ TIP: It is also possible to use `@IfBuildProfile` and `@UnlessBuildProfile` on s
[[enable_build_properties]]
=== Enabling Beans for Quarkus Build Properties
-Quarkus adds a capability that CDI currently does not support which is to conditionally enable a bean when a Quarkus build time property has/has not a specific value,
+Quarkus adds a capability that CDI currently does not support which is to conditionally enable a bean when a Quarkus build time property has or does not have a specific value,
via the `@io.quarkus.arc.properties.IfBuildProperty` and `@io.quarkus.arc.properties.UnlessBuildProperty` annotations.
-When used in conjunction with `@io.quarkus.arc.DefaultBean`, this annotation allow for the creation of different bean configurations for different build properties.
+When used in conjunction with `@io.quarkus.arc.DefaultBean`, these annotations allow for the creation of different bean configurations for different build properties.
The scenario we mentioned above with `Tracer` could also be implemented in the following way:
diff --git a/docs/src/main/asciidoc/centralized-log-management.adoc b/docs/src/main/asciidoc/centralized-log-management.adoc
index f0127b3d339ce..6885fd2f5e7fd 100644
--- a/docs/src/main/asciidoc/centralized-log-management.adoc
+++ b/docs/src/main/asciidoc/centralized-log-management.adoc
@@ -6,18 +6,15 @@ https://github.com/quarkusio/quarkus/tree/main/docs/src/main/asciidoc
= Centralized log management (Graylog, Logstash, Fluentd)
include::_attributes.adoc[]
:categories: observability
-:summary: This guide explains how to centralize your logs with Logstash or Fluentd using the Graylog Extended Log Format (GELF).
+:summary: This guide explains how to centralize your logs with Graylog, Logstash or Fluentd.
:topics: observability,logging
-:extensions: io.quarkus:quarkus-logging-gelf
+:extensions: io.quarkus:quarkus-logging-gelf,io.quarkus:quarkus-opentelemetry
This guide explains how you can send your logs to a centralized log management system like Graylog, Logstash (inside the Elastic Stack or ELK - Elasticsearch, Logstash, Kibana) or
Fluentd (inside EFK - Elasticsearch, Fluentd, Kibana).
-There are a lot of different ways to centralize your logs (if you are using Kubernetes, the simplest way is to log to the console and ask you cluster administrator to integrate a central log manager inside your cluster).
-In this guide, we will expose how to send them to an external tool using the `quarkus-logging-gelf` extension that can use TCP or UDP to send logs in the Graylog Extended Log Format (GELF).
-
-The `quarkus-logging-gelf` extension will add a GELF log handler to the underlying logging backend that Quarkus uses (jboss-logmanager).
-By default, it is disabled, if you enable it but still use another handler (by default the console handler is enabled), your logs will be sent to both handlers.
+There are a lot of different ways to centralize your logs (if you are using Kubernetes, the simplest way is to log to the console and ask your cluster administrator to integrate a central log manager inside your cluster).
+In this guide, we will expose how to send them to an external tool using supported Quarkus extensions in supported standard formats like Graylog Extended Log Format (GELF), Elastic Common Schema (ECS) or the OpenTelemetry Log signal.
== Prerequisites
@@ -28,35 +25,12 @@ include::{includes}/prerequisites.adoc[]
The following examples will all be based on the same example application that you can create with the following steps.
-Create an application with the `quarkus-logging-gelf` extension. You can use the following command to create it:
+Create an application with the REST extension. You can use the following command to create it:
-:create-app-artifact-id: gelf-logging
-:create-app-extensions: rest,logging-gelf
+:create-app-artifact-id: centralized-logging
+:create-app-extensions: rest
include::{includes}/devtools/create-app.adoc[]
-If you already have your Quarkus project configured, you can add the `logging-gelf` extension
-to your project by running the following command in your project base directory:
-
-:add-extension-extensions: logging-gelf
-include::{includes}/devtools/extension-add.adoc[]
-
-This will add the following dependency to your build file:
-
-[source,xml,role="primary asciidoc-tabs-target-sync-cli asciidoc-tabs-target-sync-maven"]
-.pom.xml
-----
-
- io.quarkus
- quarkus-logging-gelf
-
-----
-
-[source,gradle,role="secondary asciidoc-tabs-target-sync-gradle"]
-.build.gradle
-----
-implementation("io.quarkus:quarkus-logging-gelf")
-----
-
For demonstration purposes, we create an endpoint that does nothing but log a sentence. You don't need to do this inside your application.
[source,java]
@@ -67,10 +41,10 @@ import jakarta.ws.rs.Path;
import org.jboss.logging.Logger;
-@Path("/gelf-logging")
+@Path("/logging")
@ApplicationScoped
-public class GelfLoggingResource {
- private static final Logger LOG = Logger.getLogger(GelfLoggingResource.class);
+public class LoggingResource {
+ private static final Logger LOG = Logger.getLogger(LoggingResource.class);
@GET
public void log() {
@@ -80,27 +54,85 @@ public class GelfLoggingResource {
}
----
-Configure the GELF log handler to send logs to an external UDP endpoint on the port 12201:
+== Send logs to the Elastic Stack (ELK) in the ECS (Elastic Common Schema) format with the Socket handler
-[source,properties]
+You can send your logs to Logstash using a TCP input in the https://www.elastic.co/guide/en/ecs-logging/overview/current/intro.html[ECS] format.
+To achieve this, we will use the `quarkus-logging-json` extension to format the logs in JSON format and the socket handler to send them to Logstash.
+
+Create the following file in `$HOME/pipelines/ecs.conf`:
+
+[source]
----
-quarkus.log.handler.gelf.enabled=true
-quarkus.log.handler.gelf.host=localhost
-quarkus.log.handler.gelf.port=12201
+input {
+ tcp {
+ port => 4560
+ codec => json
+ }
+}
+
+filter {
+ if ![span][id] and [mdc][spanId] {
+ mutate { rename => { "[mdc][spanId]" => "[span][id]" } }
+ }
+ if ![trace][id] and [mdc][traceId] {
+ mutate { rename => {"[mdc][traceId]" => "[trace][id]"} }
+ }
+}
+
+output {
+ stdout {}
+ elasticsearch {
+ hosts => ["http://elasticsearch:9200"]
+ }
+}
----
-== Send logs to Graylog
+Then configure your application to log in JSON format
-To send logs to Graylog, you first need to launch the components that compose the Graylog stack:
+[source,xml,role="primary asciidoc-tabs-target-sync-cli asciidoc-tabs-target-sync-maven"]
+.pom.xml
+----
+
+ io.quarkus
+ quarkus-logging-json
+
+----
+
+[source,gradle,role="secondary asciidoc-tabs-target-sync-gradle"]
+.build.gradle
+----
+implementation("io.quarkus:quarkus-logging-json")
+----
+
+and specify the host and port of your Logstash endpoint. To be ECS compliant, specify the log format.
+
+[source, properties]
+----
+# to keep the logs in the usual format in the console
+quarkus.log.console.json=false
+
+quarkus.log.socket.enable=true
+quarkus.log.socket.json=true
+quarkus.log.socket.endpoint=localhost:4560
+
+# to have the exception serialized into a single text element
+quarkus.log.socket.json.exception-output-type=formatted
+
+# specify the format of the produced JSON log
+quarkus.log.socket.json.log-format=ECS
+----
+
+Finally, launch the components that compose the Elastic Stack:
-- MongoDB
- Elasticsearch
-- Graylog
+- Logstash
+- Kibana
You can do this via the following `docker-compose.yml` file that you can launch via `docker-compose up -d`:
[source,yaml,subs="attributes"]
----
+# Launch Elasticsearch
version: '3.2'
services:
@@ -108,87 +140,90 @@ services:
image: {elasticsearch-image}
ports:
- "9200:9200"
+ - "9300:9300"
environment:
ES_JAVA_OPTS: "-Xms512m -Xmx512m"
discovery.type: "single-node"
cluster.routing.allocation.disk.threshold_enabled: false
networks:
- - graylog
+ - elk
- mongo:
- image: mongo:4.0
+ logstash:
+ image: {logstash-image}
+ volumes:
+ - source: $HOME/pipelines
+ target: /usr/share/logstash/pipeline
+ type: bind
+ ports:
+ - "12201:12201/udp"
+ - "5000:5000"
+ - "9600:9600"
networks:
- - graylog
+ - elk
+ depends_on:
+ - elasticsearch
- graylog:
- image: graylog/graylog:4.3.0
+ kibana:
+ image: {kibana-image}
ports:
- - "9000:9000"
- - "12201:12201/udp"
- - "1514:1514"
- environment:
- GRAYLOG_HTTP_EXTERNAL_URI: "http://127.0.0.1:9000/"
- # CHANGE ME (must be at least 16 characters)!
- GRAYLOG_PASSWORD_SECRET: "forpasswordencryption"
- # Password: admin
- GRAYLOG_ROOT_PASSWORD_SHA2: "8c6976e5b5410415bde908bd4dee15dfb167a9c873fc4bb8a81f6f2ab448a918"
+ - "5601:5601"
networks:
- - graylog
+ - elk
depends_on:
- elasticsearch
- - mongo
networks:
- graylog:
+ elk:
driver: bridge
+
----
-Then, you need to create a UDP input in Graylog.
-You can do it from the Graylog web console (System -> Input -> Select GELF UDP) available at http://localhost:9000 or via the API.
+Launch your application, you should see your logs arriving inside the Elastic Stack; you can use Kibana available at http://localhost:5601/ to access them.
-This curl example will create a new Input of type GELF UDP, it uses the default login from Graylog (admin/admin).
+== Send logs to Fluentd with the Syslog handler
-[source,bash]
-----
-curl -H "Content-Type: application/json" -H "Authorization: Basic YWRtaW46YWRtaW4=" -H "X-Requested-By: curl" -X POST -v -d \
-'{"title":"udp input","configuration":{"recv_buffer_size":262144,"bind_address":"0.0.0.0","port":12201,"decompress_size_limit":8388608},"type":"org.graylog2.inputs.gelf.udp.GELFUDPInput","global":true}' \
-http://localhost:9000/api/system/inputs
-----
-
-Launch your application, you should see your logs arriving inside Graylog.
+You can send your logs to Fluentd using a Syslog input.
+As opposed to the GELF input, the Syslog input will not render multiline logs in one event.
-== Send logs to Logstash / the Elastic Stack (ELK)
+First, you need to create a Fluentd image with the Elasticsearch plugin.
+You can use the following Dockerfile that should be created inside a `fluentd` directory.
-Logstash comes by default with an Input plugin that can understand the GELF format, we will first create a pipeline that enables this plugin.
+[source,dockerfile]
+----
+FROM fluent/fluentd:v1.3-debian
+RUN ["gem", "install", "fluent-plugin-elasticsearch", "--version", "3.7.0"]
+----
-Create the following file in `$HOME/pipelines/gelf.conf`:
+Then, you need to create a Fluentd configuration file inside `$HOME/fluentd/fluent.conf`
[source]
----
-input {
- gelf {
- port => 12201
- }
-}
-output {
- stdout {}
- elasticsearch {
- hosts => ["http://elasticsearch:9200"]
- }
-}
+
+ @type syslog
+ port 5140
+ bind 0.0.0.0
+ message_format rfc5424
+ tag system
+
+
+
+ @type elasticsearch
+ host elasticsearch
+ port 9200
+ logstash_format true
+
----
-Finally, launch the components that compose the Elastic Stack:
+Then, launch the components that compose the EFK Stack:
- Elasticsearch
-- Logstash
+- Fluentd
- Kibana
You can do this via the following `docker-compose.yml` file that you can launch via `docker-compose up -d`:
[source,yaml,subs="attributes"]
----
-# Launch Elasticsearch
version: '3.2'
services:
@@ -202,20 +237,18 @@ services:
discovery.type: "single-node"
cluster.routing.allocation.disk.threshold_enabled: false
networks:
- - elk
+ - efk
- logstash:
- image: {logstash-image}
+ fluentd:
+ build: fluentd
+ ports:
+ - "5140:5140/udp"
volumes:
- - source: $HOME/pipelines
- target: /usr/share/logstash/pipeline
+ - source: $HOME/fluentd
+ target: /fluentd/etc
type: bind
- ports:
- - "12201:12201/udp"
- - "5000:5000"
- - "9600:9600"
networks:
- - elk
+ - efk
depends_on:
- elasticsearch
@@ -224,132 +257,174 @@ services:
ports:
- "5601:5601"
networks:
- - elk
+ - efk
depends_on:
- elasticsearch
networks:
- elk:
+ efk:
driver: bridge
+----
+
+Finally, configure your application to send logs to EFK using Syslog:
+[source,properties]
+----
+quarkus.log.syslog.enable=true
+quarkus.log.syslog.endpoint=localhost:5140
+quarkus.log.syslog.protocol=udp
+quarkus.log.syslog.app-name=quarkus
+quarkus.log.syslog.hostname=quarkus-test
----
-Launch your application, you should see your logs arriving inside the Elastic Stack; you can use Kibana available at http://localhost:5601/ to access them.
+Launch your application, you should see your logs arriving inside EFK: you can use Kibana available at http://localhost:5601/ to access them.
+== Send logs with OpenTelemetry Logging
-[[logstash_ecs]]
-== GELF alternative: Send logs to Logstash in the ECS (Elastic Common Schema) format
+OpenTelemetry Logging is able to send logs to a compatible OpenTelemetry collector. Its usage is described in the guide xref:opentelemetry-logging.adoc[Using OpenTelemetry Logging].
-You can also send your logs to Logstash using a TCP input in the https://www.elastic.co/guide/en/ecs-logging/overview/current/intro.html[ECS] format.
-To achieve this we will use the `quarkus-logging-json` extension to format the logs in JSON format and the socket handler to send them to Logstash.
+== Send logs with the `logging-gelf` extension
-For this you can use the same `docker-compose.yml` file as above but with a different Logstash pipeline configuration.
+WARNING: This extension is deprecated, we advise considering the alternatives described above in this guide.
-[source]
-----
-input {
- tcp {
- port => 4560
- codec => json
- }
-}
+The `quarkus-logging-gelf` extension will add a GELF log handler to the underlying logging backend that Quarkus uses (jboss-logmanager).
+By default, it is disabled, if you enable it but still use another handler (by default the console handler is enabled), your logs will be sent to both handlers.
-filter {
- if ![span][id] and [mdc][spanId] {
- mutate { rename => { "[mdc][spanId]" => "[span][id]" } }
- }
- if ![trace][id] and [mdc][traceId] {
- mutate { rename => {"[mdc][traceId]" => "[trace][id]"} }
- }
-}
+You can add the `logging-gelf` extension to your project by running the following command in your project base directory:
-output {
- stdout {}
- elasticsearch {
- hosts => ["http://elasticsearch:9200"]
- }
-}
-----
+:add-extension-extensions: logging-gelf
+include::{includes}/devtools/extension-add.adoc[]
-Then configure your application to log in JSON format instead of GELF
+This will add the following dependency to your build file:
[source,xml,role="primary asciidoc-tabs-target-sync-cli asciidoc-tabs-target-sync-maven"]
.pom.xml
----
io.quarkus
- quarkus-logging-json
+ quarkus-logging-gelf
----
[source,gradle,role="secondary asciidoc-tabs-target-sync-gradle"]
.build.gradle
----
-implementation("io.quarkus:quarkus-logging-json")
+implementation("io.quarkus:quarkus-logging-gelf")
----
-and specify the host and port of your Logstash endpoint. To be ECS compliant, specify the log format.
+Configure the GELF log handler to send logs to an external UDP endpoint on port 12201:
-[source, properties]
+[source,properties]
+----
+quarkus.log.handler.gelf.enabled=true
+quarkus.log.handler.gelf.host=localhost
+quarkus.log.handler.gelf.port=12201
----
-# to keep the logs in the usual format in the console
-quarkus.log.console.json=false
-quarkus.log.socket.enable=true
-quarkus.log.socket.json=true
-quarkus.log.socket.endpoint=localhost:4560
+=== Send logs to Graylog
-# to have the exception serialized into a single text element
-quarkus.log.socket.json.exception-output-type=formatted
+NOTE: It is advised to use the Syslog handler instead.
-# specify the format of the produced JSON log
-quarkus.log.socket.json.log-format=ECS
+To send logs to Graylog, you first need to launch the components that compose the Graylog stack:
+
+- MongoDB
+- Elasticsearch
+- Graylog
+
+You can do this via the following `docker-compose.yml` file that you can launch via `docker-compose up -d`:
+
+[source,yaml,subs="attributes"]
----
+version: '3.2'
+services:
+ elasticsearch:
+ image: {elasticsearch-image}
+ ports:
+ - "9200:9200"
+ environment:
+ ES_JAVA_OPTS: "-Xms512m -Xmx512m"
+ discovery.type: "single-node"
+ cluster.routing.allocation.disk.threshold_enabled: false
+ networks:
+ - graylog
-== Send logs to Fluentd (EFK)
+ mongo:
+ image: mongo:4.0
+ networks:
+ - graylog
-First, you need to create a Fluentd image with the needed plugins: elasticsearch and input-gelf.
-You can use the following Dockerfile that should be created inside a `fluentd` directory.
+ graylog:
+ image: graylog/graylog:4.3.0
+ ports:
+ - "9000:9000"
+ - "12201:12201/udp"
+ - "1514:1514"
+ environment:
+ GRAYLOG_HTTP_EXTERNAL_URI: "http://127.0.0.1:9000/"
+ # CHANGE ME (must be at least 16 characters)!
+ GRAYLOG_PASSWORD_SECRET: "forpasswordencryption"
+ # Password: admin
+ GRAYLOG_ROOT_PASSWORD_SHA2: "8c6976e5b5410415bde908bd4dee15dfb167a9c873fc4bb8a81f6f2ab448a918"
+ networks:
+ - graylog
+ depends_on:
+ - elasticsearch
+ - mongo
-[source,dockerfile]
+networks:
+ graylog:
+ driver: bridge
----
-FROM fluent/fluentd:v1.3-debian
-RUN ["gem", "install", "fluent-plugin-elasticsearch", "--version", "3.7.0"]
-RUN ["gem", "install", "fluent-plugin-input-gelf", "--version", "0.3.1"]
+
+Then, you need to create a UDP input in Graylog.
+You can do it from the Graylog web console (System -> Input -> Select GELF UDP) available at http://localhost:9000 or via the API.
+
+This curl example will create a new Input of type GELF UDP, it uses the default login from Graylog (admin/admin).
+
+[source,bash]
+----
+curl -H "Content-Type: application/json" -H "Authorization: Basic YWRtaW46YWRtaW4=" -H "X-Requested-By: curl" -X POST -v -d \
+'{"title":"udp input","configuration":{"recv_buffer_size":262144,"bind_address":"0.0.0.0","port":12201,"decompress_size_limit":8388608},"type":"org.graylog2.inputs.gelf.udp.GELFUDPInput","global":true}' \
+http://localhost:9000/api/system/inputs
----
-You can build the image or let docker-compose build it for you.
+Launch your application, you should see your logs arriving inside Graylog.
-Then you need to create a fluentd configuration file inside `$HOME/fluentd/fluent.conf`
+=== Send logs to Logstash / the Elastic Stack (ELK)
+
+NOTE: It is advised to use xref:opentelemetry-logging.adoc[OpenTelemetry Logging] or the Socket handler instead.
+
+Logstash comes by default with an Input plugin that can understand the GELF format, we will first create a pipeline that enables this plugin.
+
+Create the following file in `$HOME/pipelines/gelf.conf`:
[source]
----
-
- type gelf
- tag example.gelf
- bind 0.0.0.0
- port 12201
-
-
-
- @type elasticsearch
- host elasticsearch
- port 9200
- logstash_format true
-
+input {
+ gelf {
+ port => 12201
+ }
+}
+output {
+ stdout {}
+ elasticsearch {
+ hosts => ["http://elasticsearch:9200"]
+ }
+}
----
-Finally, launch the components that compose the EFK Stack:
+Finally, launch the components that compose the Elastic Stack:
- Elasticsearch
-- Fluentd
+- Logstash
- Kibana
You can do this via the following `docker-compose.yml` file that you can launch via `docker-compose up -d`:
[source,yaml,subs="attributes"]
----
+# Launch Elasticsearch
version: '3.2'
services:
@@ -363,18 +438,20 @@ services:
discovery.type: "single-node"
cluster.routing.allocation.disk.threshold_enabled: false
networks:
- - efk
+ - elk
- fluentd:
- build: fluentd
- ports:
- - "12201:12201/udp"
+ logstash:
+ image: {logstash-image}
volumes:
- - source: $HOME/fluentd
- target: /fluentd/etc
+ - source: $HOME/pipelines
+ target: /usr/share/logstash/pipeline
type: bind
+ ports:
+ - "12201:12201/udp"
+ - "5000:5000"
+ - "9600:9600"
networks:
- - efk
+ - elk
depends_on:
- elasticsearch
@@ -383,44 +460,46 @@ services:
ports:
- "5601:5601"
networks:
- - efk
+ - elk
depends_on:
- elasticsearch
networks:
- efk:
+ elk:
driver: bridge
+
----
-Launch your application, you should see your logs arriving inside EFK: you can use Kibana available at http://localhost:5601/ to access them.
+Launch your application, you should see your logs arriving inside the Elastic Stack; you can use Kibana available at http://localhost:5601/ to access them.
-== GELF alternative: use Syslog
+=== Send logs to Fluentd (EFK)
-You can also send your logs to Fluentd using a Syslog input.
-As opposed to the GELF input, the Syslog input will not render multiline logs in one event, that's why we advise to use the GELF input that we implement in Quarkus.
+NOTE: It is advised to use xref:opentelemetry-logging.adoc[OpenTelemetry Logging] or the Socket handler instead.
-First, you need to create a Fluentd image with the elasticsearch plugin.
+First, you need to create a Fluentd image with the needed plugins: elasticsearch and input-gelf.
You can use the following Dockerfile that should be created inside a `fluentd` directory.
[source,dockerfile]
----
FROM fluent/fluentd:v1.3-debian
RUN ["gem", "install", "fluent-plugin-elasticsearch", "--version", "3.7.0"]
+RUN ["gem", "install", "fluent-plugin-input-gelf", "--version", "0.3.1"]
----
-Then, you need to create a fluentd configuration file inside `$HOME/fluentd/fluent.conf`
+You can build the image or let docker-compose build it for you.
+
+Then you need to create a fluentd configuration file inside `$HOME/fluentd/fluent.conf`
[source]
----
- @type syslog
- port 5140
+ type gelf
+ tag example.gelf
bind 0.0.0.0
- message_format rfc5424
- tag system
+ port 12201
-
+
@type elasticsearch
host elasticsearch
port 9200
@@ -428,7 +507,7 @@ Then, you need to create a fluentd configuration file inside `$HOME/fluentd/flue
----
-Then, launch the components that compose the EFK Stack:
+Finally, launch the components that compose the EFK Stack:
- Elasticsearch
- Fluentd
@@ -456,7 +535,7 @@ services:
fluentd:
build: fluentd
ports:
- - "5140:5140/udp"
+ - "12201:12201/udp"
volumes:
- source: $HOME/fluentd
target: /fluentd/etc
@@ -480,21 +559,9 @@ networks:
driver: bridge
----
-Finally, configure your application to send logs to EFK using Syslog:
-
-[source,properties]
-----
-quarkus.log.syslog.enable=true
-quarkus.log.syslog.endpoint=localhost:5140
-quarkus.log.syslog.protocol=udp
-quarkus.log.syslog.app-name=quarkus
-quarkus.log.syslog.hostname=quarkus-test
-----
-
Launch your application, you should see your logs arriving inside EFK: you can use Kibana available at http://localhost:5601/ to access them.
-
-== Elasticsearch indexing consideration
+=== Elasticsearch indexing consideration
Be careful that, by default, Elasticsearch will automatically map unknown fields (if not disabled in the index settings) by detecting their type.
This can become tricky if you use log parameters (which are included by default), or if you enable MDC inclusion (disabled by default),
@@ -518,11 +585,11 @@ or you can configure your Elasticsearch index to store those fields as text or k
See the following documentation for Graylog (but the same issue exists for the other central logging stacks): link:https://docs.graylog.org/en/3.2/pages/configuration/elasticsearch.html#custom-index-mappings[Custom Index Mappings].
[[configuration-reference]]
-== Configuration Reference
+=== Configuration Reference
Configuration is done through the usual `application.properties` file.
include::{generated-dir}/config/quarkus-logging-gelf.adoc[opts=optional, leveloffset=+1]
This extension uses the `logstash-gelf` library that allow more configuration options via system properties,
-you can access its documentation here: https://logging.paluch.biz/ .
+you can access its documentation here: https://logging.paluch.biz/.
diff --git a/docs/src/main/asciidoc/container-image.adoc b/docs/src/main/asciidoc/container-image.adoc
index 8fd7785a8e19b..6a2cbb17e4e97 100644
--- a/docs/src/main/asciidoc/container-image.adoc
+++ b/docs/src/main/asciidoc/container-image.adoc
@@ -48,7 +48,7 @@ For example, the presence of `src/main/jib/foo/bar` would result in `/foo/bar`
There are cases where the built container image may need to have Java debugging conditionally enabled at runtime.
-When the base image has not been changed (and therefore `ubi8/openjdk-11-runtime`, `ubi8/openjdk-17-runtime`, or `ubi8/openjdk-21-runtime` is used), then the `quarkus.jib.jvm-additional-arguments` configuration property can be used in order to
+When the base image has not been changed (and therefore `ubi9/openjdk-17-runtime`, or `ubi9/openjdk-21-runtime` is used), then the `quarkus.jib.jvm-additional-arguments` configuration property can be used in order to
make the JVM listen on the debug port at startup.
The exact configuration is:
@@ -64,7 +64,7 @@ Other base images might provide launch scripts that enable debugging when an env
The `quarkus.jib.jvm-entrypoint` configuration property can be used to completely override the container entry point and can thus be used to either hard code the JVM debug configuration or point to a script that handles the details.
-For example, if the base images `ubi8/openjdk-11-runtime`, `ubi8/openjdk-17-runtime` or `ubi8/openjdk-21-runtime` are used to build the container, the entry point can be hard-coded on the application properties file.
+For example, if the base images `ubi9/openjdk-17-runtime` or `ubi9/openjdk-21-runtime` are used to build the container, the entry point can be hard-coded on the application properties file.
.Example application.properties
[source,properties]
@@ -89,7 +89,7 @@ java \
-jar quarkus-run.jar
----
-NOTE: `/home/jboss` is the WORKDIR for all quarkus binaries in the base images `ubi8/openjdk-11-runtime`, `ubi8/openjdk-17-runtime` and `ubi8/openjdk-21-runtime` (https://catalog.redhat.com/software/containers/ubi8/openjdk-17/618bdbf34ae3739687568813?container-tabs=dockerfile[Dockerfile for ubi8/openjdk-17-runtime, window="_blank"])
+NOTE: `/home/jboss` is the WORKDIR for all quarkus binaries in the base images `ubi9/openjdk-17-runtime` and `ubi9/openjdk-21-runtime` (https://catalog.redhat.com/software/containers/ubi9/openjdk-21-runtime/6501ce769a0d86945c422d5f?container-tabs=dockerfile[Dockerfile for ubi9/openjdk-17-runtime, window="_blank"])
==== Multi-module projects and layering
diff --git a/docs/src/main/asciidoc/databases-dev-services.adoc b/docs/src/main/asciidoc/databases-dev-services.adoc
index 5d371c224cdcf..819a98d4c8eaf 100644
--- a/docs/src/main/asciidoc/databases-dev-services.adoc
+++ b/docs/src/main/asciidoc/databases-dev-services.adoc
@@ -212,9 +212,9 @@ Login credentials are the same for most databases, except when the database requ
|Database |Username |Password |Database name
|PostgreSQL, MariaDB, MySQL, IBM Db2, H2
-|`quarkus` for the default datasource or name of the datasource
|`quarkus`
|`quarkus`
+|`quarkus` for the default datasource or name of the datasource
|Microsoft SQL Server
|`SA`
diff --git a/docs/src/main/asciidoc/gradle-tooling.adoc b/docs/src/main/asciidoc/gradle-tooling.adoc
index 89251c264e359..276f6498fa2ad 100644
--- a/docs/src/main/asciidoc/gradle-tooling.adoc
+++ b/docs/src/main/asciidoc/gradle-tooling.adoc
@@ -518,13 +518,13 @@ Configuring the `quarkusBuild` task can be done as following:
quarkusBuild {
nativeArgs {
containerBuild = true <1>
- builderImage = "quay.io/quarkus/ubi-quarkus-mandrel-builder-image:{mandrel-flavor}" <2>
+ builderImage = "quay.io/quarkus/ubi9-quarkus-mandrel-builder-image:{mandrel-flavor}" <2>
}
}
----
<1> Set `quarkus.native.container-build` property to `true`
-<2> Set `quarkus.native.builder-image` property to `quay.io/quarkus/ubi-quarkus-mandrel-builder-image:{mandrel-flavor}`
+<2> Set `quarkus.native.builder-image` property to `quay.io/quarkus/ubi9-quarkus-mandrel-builder-image:{mandrel-flavor}`
****
[role="secondary asciidoc-tabs-sync-kotlin"]
@@ -535,13 +535,13 @@ quarkusBuild {
tasks.quarkusBuild {
nativeArgs {
"container-build" to true <1>
- "builder-image" to "quay.io/quarkus/ubi-quarkus-mandrel-builder-image:{mandrel-flavor}" <2>
+ "builder-image" to "quay.io/quarkus/ubi9-quarkus-mandrel-builder-image:{mandrel-flavor}" <2>
}
}
----
<1> Set `quarkus.native.container-build` property to `true`
-<2> Set `quarkus.native.builder-image` property to `quay.io/quarkus/ubi-quarkus-mandrel-builder-image:{mandrel-flavor}`
+<2> Set `quarkus.native.builder-image` property to `quay.io/quarkus/ubi9-quarkus-mandrel-builder-image:{mandrel-flavor}`
****
[WARNING]
@@ -564,13 +564,15 @@ Note that in this case the build itself runs in a Docker container too, so you d
[TIP]
====
-By default, the native executable will be generated using the `quay.io/quarkus/ubi-quarkus-mandrel-builder-image:{mandrel-flavor}` Docker image.
+By default, the native executable will be generated using the `quay.io/quarkus/ubi9-quarkus-mandrel-builder-image:{mandrel-flavor}` Docker image.
If you want to build a native executable with a different Docker image (for instance to use a different GraalVM version),
use the `-Dquarkus.native.builder-image=` build argument.
-The list of the available Docker images can be found on https://quay.io/repository/quarkus/ubi-quarkus-mandrel-builder-image?tab=tags[quay.io].
+The list of the available Docker images can be found on https://quay.io/repository/quarkus/ubi9-quarkus-mandrel-builder-image?tab=tags[quay.io].
Be aware that a given Quarkus version might not be compatible with all the images available.
+
+Note also that starting Quarkus 3.19, the default _builder_ images are based on UBI 9. To use the previous UBI 8 based images, you can use the pick an image from the https://quay.io/repository/quarkus/ubi-quarkus-mandrel-builder-image?tab=tags[quay.io repository].
====
== Running native tests
diff --git a/docs/src/main/asciidoc/grpc-service-consumption.adoc b/docs/src/main/asciidoc/grpc-service-consumption.adoc
index 177d8c14a5db5..f15213bc7a645 100644
--- a/docs/src/main/asciidoc/grpc-service-consumption.adoc
+++ b/docs/src/main/asciidoc/grpc-service-consumption.adoc
@@ -204,8 +204,12 @@ public class StreamingEndpoint {
For each gRPC service you inject in your application, you can configure the following attributes:
+=== Global configuration
include::{generated-dir}/config/quarkus-grpc_quarkus.grpc-client.adoc[opts=optional, leveloffset=+1]
+=== Per-client configuration
+include::{generated-dir}/config/quarkus-grpc_quarkus.grpc.clients.adoc[opts=optional, leveloffset=+1]
+
The `client-name` is the name set in the `@GrpcClient` or derived from the injection point if not explicitly defined.
The following examples uses _hello_ as the client name.
@@ -213,6 +217,50 @@ Don't forget to replace it with the name you used in the `@GrpcClient` annotatio
IMPORTANT: When you enable `quarkus.grpc.clients."client-name".xds.enabled`, it's the xDS that should handle most of the configuration above.
+=== Custom Channel building
+
+When Quarkus builds a gRPC Channel instance (the way gRPC clients communicate with gRPC services on a lower network level), users can apply their own Channel(Builder) customizers. The customizers are applied by `priority`, the higher the number the later customizer is applied. The customizers are applied before Quarkus applies user's client configuration; e.g. ideal for some initial defaults per all clients.
+
+There are two `customize` methods, the first one uses gRPC's `ManagedChannelBuilder` as a parameter - to be used with Quarkus' legacy gRPC support, where the other uses `GrpcClientOptions` - to be used with the new Vert.x gRPC support. User should implement the right `customize` method per gRPC support type usage, or both if the customizer is gRPC type neutral.
+
+[source, java]
+----
+public interface ChannelBuilderCustomizer> {
+
+ /**
+ * Customize a ManagedChannelBuilder instance.
+ *
+ * @param name gRPC client name
+ * @param config client's configuration
+ * @param builder Channel builder instance
+ * @return map of config properties to be used as default service config against the builder
+ */
+ default Map customize(String name, GrpcClientConfiguration config, T builder) {
+ return Map.of();
+ }
+
+ /**
+ * Customize a GrpcClientOptions instance.
+ *
+ * @param name gRPC client name
+ * @param config client's configuration
+ * @param options GrpcClientOptions instance
+ */
+ default void customize(String name, GrpcClientConfiguration config, GrpcClientOptions options) {
+ }
+
+ /**
+ * Priority by which the customizers are applied.
+ * Higher priority is applied later.
+ *
+ * @return the priority
+ */
+ default int priority() {
+ return 0;
+ }
+}
+----
+
=== Enabling TLS
To enable TLS, use the following configuration.
diff --git a/docs/src/main/asciidoc/hibernate-orm.adoc b/docs/src/main/asciidoc/hibernate-orm.adoc
index e369e8ca015c2..751c6dd823cf1 100644
--- a/docs/src/main/asciidoc/hibernate-orm.adoc
+++ b/docs/src/main/asciidoc/hibernate-orm.adoc
@@ -1082,7 +1082,7 @@ Jump over to xref:datasource.adoc[Quarkus - Datasources] for all details.
Quarkus currently supports the link:{hibernate-orm-docs-url}#multitenacy-separate-database[separate database] approach, the link:{hibernate-orm-docs-url}#multitenacy-separate-schema[separate schema] approach and the link:{hibernate-orm-docs-url}#multitenacy-discriminator[discriminator] approach.
-To see multitenancy in action, you can check out the link:{quickstarts-tree-url}/hibernate-orm-multi-tenancy-quickstart[hibernate-orm-multi-tenancy-quickstart] quickstart.
+To see multitenancy in action, you can check out the link:{quickstarts-tree-url}/hibernate-orm-multi-tenancy-schema-quickstart[hibernate-orm-multi-tenancy-schema-quickstart] or link:{quickstarts-tree-url}/hibernate-orm-multi-tenancy-database-quickstart[hibernate-orm-multi-tenancy-database-quickstart].
=== Writing the application
@@ -1210,7 +1210,7 @@ quarkus.hibernate-orm.multitenant=SCHEMA <2>
quarkus.datasource.db-kind=postgresql <3>
quarkus.datasource.username=quarkus_test
quarkus.datasource.password=quarkus_test
-quarkus.datasource.jdbc.url=jdbc:postgresql://localhost:5432/quarkus_test
+%prod.quarkus.datasource.jdbc.url=jdbc:postgresql://localhost:5432/quarkus_test
quarkus.flyway.schemas=base,mycompany <4>
quarkus.flyway.locations=classpath:schema
@@ -1278,7 +1278,7 @@ quarkus.hibernate-orm.datasource=base <3>
quarkus.datasource.base.db-kind=postgresql <4>
quarkus.datasource.base.username=quarkus_test
quarkus.datasource.base.password=quarkus_test
-quarkus.datasource.base.jdbc.url=jdbc:postgresql://localhost:5432/quarkus_test
+%prod.quarkus.datasource.base.jdbc.url=jdbc:postgresql://localhost:5432/quarkus_test
quarkus.flyway.base.locations=classpath:database/base <5>
quarkus.flyway.base.migrate-at-start=true
@@ -1286,7 +1286,7 @@ quarkus.flyway.base.migrate-at-start=true
quarkus.datasource.mycompany.db-kind=postgresql <6>
quarkus.datasource.mycompany.username=mycompany
quarkus.datasource.mycompany.password=mycompany
-quarkus.datasource.mycompany.jdbc.url=jdbc:postgresql://localhost:5433/mycompany
+%prod.quarkus.datasource.mycompany.jdbc.url=jdbc:postgresql://localhost:5433/mycompany
quarkus.flyway.mycompany.locations=classpath:database/mycompany <7>
quarkus.flyway.mycompany.migrate-at-start=true
----
diff --git a/docs/src/main/asciidoc/images/observability-grafana-dashboards.png b/docs/src/main/asciidoc/images/observability-grafana-dashboards.png
new file mode 100644
index 0000000000000..76039ff24ee13
Binary files /dev/null and b/docs/src/main/asciidoc/images/observability-grafana-dashboards.png differ
diff --git a/docs/src/main/asciidoc/images/observability-grafana-loki.png b/docs/src/main/asciidoc/images/observability-grafana-loki.png
new file mode 100644
index 0000000000000..1e33c42e31e89
Binary files /dev/null and b/docs/src/main/asciidoc/images/observability-grafana-loki.png differ
diff --git a/docs/src/main/asciidoc/images/observability-grafana-tempo.png b/docs/src/main/asciidoc/images/observability-grafana-tempo.png
new file mode 100644
index 0000000000000..09a180f66c326
Binary files /dev/null and b/docs/src/main/asciidoc/images/observability-grafana-tempo.png differ
diff --git a/docs/src/main/asciidoc/infinispan-client-reference.adoc b/docs/src/main/asciidoc/infinispan-client-reference.adoc
index 7084c2e4ae3d9..77fdc84bccdf2 100644
--- a/docs/src/main/asciidoc/infinispan-client-reference.adoc
+++ b/docs/src/main/asciidoc/infinispan-client-reference.adoc
@@ -299,6 +299,7 @@ distributedCache:
You can use the following authentication mechanisms with the Infinispan client:
+* DIGEST-SHA-512 (default)
* DIGEST-MD5
* PLAIN (recommended only in combination with TLS encryption)
* EXTERNAL
diff --git a/docs/src/main/asciidoc/kafka.adoc b/docs/src/main/asciidoc/kafka.adoc
index ac01aac591db6..147f0b6dc30e7 100644
--- a/docs/src/main/asciidoc/kafka.adoc
+++ b/docs/src/main/asciidoc/kafka.adoc
@@ -2956,9 +2956,7 @@ NOTE: If you use Hibernate Reactive, look at < emitter;
+ @Channel("kafka") MutinyEmitter emitter;
@POST
@Path("/fruits")
- @Transactional // <1>
- public CompletionStage storeAndSendToKafka(Fruit fruit) { // <2>
+ @Transactional // <1>
+ public void storeAndSendToKafka(Fruit fruit) { // <2>
fruit.persist();
- return emitter.send(new FruitDto(fruit)); // <3>
+ emitter.sendAndAwait(new FruitDto(fruit)); // <3>
}
}
----
<1> As we are writing to the database, make sure we run inside a transaction
-<2> The method receives the fruit instance to persist. It returns a `CompletionStage` which is used for the transaction demarcation. The transaction is committed when the return `CompletionStage` completes. In our case, it's when the message is written to Kafka.
+<2> The method receives the fruit instance to persist.
<3> Wrap the managed entity inside a Data transfer object and send it to Kafka.
This makes sure that managed entity is not impacted by the Kafka serialization.
+Then await the completion of the operation before returning.
+
+NOTE: You should not return a `CompletionStage` or `Uni` when using `@Transactional`, as all transaction commits will happen on a single thread, which impacts performance.
[[writing-entities-managed-by-hibernate-reactive-to-kafka]]
=== Writing entities managed by Hibernate Reactive to Kafka
@@ -3191,23 +3192,104 @@ public class FruitProducer {
@Consumes(MediaType.APPLICATION_JSON)
@Bulkhead(1)
public Uni post(Fruit fruit) {
- Context context = Vertx.currentContext(); // <2>
- return sf.withTransaction(session -> // <3>
- kafkaTx.withTransaction(emitter -> // <4>
- session.persist(fruit).invoke(() -> emitter.send(fruit)) // <5>
- ).emitOn(context::runOnContext) // <6>
- );
+ return sf.withTransaction(session -> // <2>
+ kafkaTx.withTransaction(emitter -> // <3>
+ session.persist(fruit).invoke(() -> emitter.send(fruit)) // <4>
+ ));
}
}
----
<1> Inject the Hibernate Reactive `SessionFactory`.
-<2> Capture the caller Vert.x context.
-<3> Begin a Hibernate Reactive transaction.
-<4> Begin a Kafka transaction.
-<5> Persist the payload and send the entity to Kafka.
-<6> The Kafka transaction terminates on the Kafka producer sender thread.
-We need to switch to the Vert.x context previously captured in order to terminate the Hibernate Reactive transaction on the same context we started it.
+<2> Begin a Hibernate Reactive transaction.
+<3> Begin a Kafka transaction.
+<4> Persist the payload and send the entity to Kafka.
+
+Alternatively, you can use the `@WithTransaction` annotation to start a transaction and commit it when the method returns:
+
+[source, java]
+----
+import jakarta.inject.Inject;
+import jakarta.ws.rs.Consumes;
+import jakarta.ws.rs.POST;
+import jakarta.ws.rs.Path;
+import jakarta.ws.rs.core.MediaType;
+
+import org.eclipse.microprofile.faulttolerance.Bulkhead;
+import org.eclipse.microprofile.reactive.messaging.Channel;
+
+import io.quarkus.hibernate.reactive.panache.common.WithTransaction;
+import io.smallrye.mutiny.Uni;
+import io.smallrye.reactive.messaging.kafka.transactions.KafkaTransactions;
+
+@Path("/")
+public class FruitProducer {
+
+ @Channel("kafka") KafkaTransactions kafkaTx;
+
+ @POST
+ @Path("/fruits")
+ @Consumes(MediaType.APPLICATION_JSON)
+ @Bulkhead(1)
+ @WithTransaction // <1>
+ public Uni post(Fruit fruit) {
+ return kafkaTx.withTransaction(emitter -> // <2>
+ fruit.persist().invoke(() -> emitter.send(fruit)) // <3>
+ );
+ }
+}
+----
+
+<1> Start a Hibernate Reactive transaction and commit it when the method returns.
+<2> Begin a Kafka transaction.
+<3> Persist the payload and send the entity to Kafka.
+
+[[chaining-kafka-transactions-with-hibernate-orm-transactions]]
+=== Chaining Kafka Transactions with Hibernate ORM transactions
+
+While `KafkaTransactions` provide a reactive API on top of Mutiny to manage Kafka transactions,
+you can still chain Kafka transactions with blocking Hibernate ORM transactions.
+
+[source, java]
+----
+import jakarta.transaction.Transactional;
+import jakarta.ws.rs.POST;
+import jakarta.ws.rs.Path;
+
+import org.eclipse.microprofile.reactive.messaging.Channel;
+
+import io.quarkus.logging.Log;
+import io.smallrye.mutiny.Uni;
+import io.smallrye.reactive.messaging.kafka.transactions.KafkaTransactions;
+
+@Path("/")
+public class FruitProducer {
+
+ @Channel("kafka") KafkaTransactions emitter;
+
+ @POST
+ @Path("/fruits")
+ @Consumes(MediaType.APPLICATION_JSON)
+ @Bulkhead(1)
+ @Transactional // <1>
+ public void post(Fruit fruit) {
+ emitter.withTransaction(e -> { // <2>
+ // if id is attributed by the database, will need to flush to get it
+ // fruit.persistAndFlush();
+ fruit.persist(); // <3>
+ Log.infov("Persisted fruit {0}", p);
+ e.send(p); // <4>
+ return Uni.createFrom().voidItem();
+ }).await().indefinitely(); // <5>
+ }
+}
+----
+
+<1> Start a Hibernate ORM transaction. The transaction is committed when the method returns.
+<2> Begin a Kafka transaction.
+<3> Persist the payload.
+<4> Send the entity to Kafka inside the Kafka transaction.
+<5> Wait on the returned `Uni` for the Kafka transaction to complete.
== Logging
diff --git a/docs/src/main/asciidoc/logging.adoc b/docs/src/main/asciidoc/logging.adoc
index 1a50a862618b0..ca4362f0bd7c7 100644
--- a/docs/src/main/asciidoc/logging.adoc
+++ b/docs/src/main/asciidoc/logging.adoc
@@ -169,9 +169,11 @@ class SimpleBean {
<1> The fully qualified class name (FQCN) of the declaring class is used as a logger name, for example, `org.jboss.logging.Logger.getLogger(SimpleBean.class)` will be used.
<2> In this case, the name _foo_ is used as a logger name, for example, `org.jboss.logging.Logger.getLogger("foo")` will be used.
-NOTE: The logger instances are cached internally.
+[NOTE]
+====
+The logger instances are cached internally.
Therefore, when a logger is injected, for example, into a `@RequestScoped` bean, it is shared for all bean instances to avoid possible performance penalties associated with logger instantiation.
-
+====
== Use log levels
@@ -530,12 +532,9 @@ For details about its configuration, see the xref:#quarkus-core_section_quarkus-
=== Socket log handler
-This handler will send the logs to a socket.
-It is disabled by default, so you must first enable it.
-When enabled, it sends all log events to a socket, for instance to a Logstash server.
-
-This will typically be used in conjunction with the `quarkus-logging-json` extension so send logs in ECS format to an Elasticsearch instance.
-An example configuration can be found in the xref:centralized-log-management.adoc[Centralized log management] guide.
+This handler sends logs to a socket.
+Socket log handler is disabled by default; enable it to use it.
+When enabled, it sends all log events to a socket, such as a Logstash server.
* A global configuration example:
+
@@ -545,6 +544,9 @@ quarkus.log.socket.enable=true
quarkus.log.socket.endpoint=localhost:4560
----
+Typically, this handler is used with the `quarkus-logging-json` extension to send logs in ECS format to an Elasticsearch instance.
+For an example configuration, see the xref:centralized-log-management.adoc[Centralized log management] guide.
+
== Add a logging filter to your log handler
@@ -610,8 +612,11 @@ quarkus.console.color=false
quarkus.log.category."io.quarkus".level=INFO
----
-NOTE: If you add these properties in the command line, ensure `"` is escaped.
+[NOTE]
+====
+If you add these properties in the command line, ensure `"` is escaped.
For example, `-Dquarkus.log.category.\"io.quarkus\".level=DEBUG`.
+====
[[category-example]]
.File TRACE logging configuration
@@ -667,7 +672,7 @@ To send logs to a centralized tool such as Graylog, Logstash, or Fluentd, see th
=== OpenTelemetry logging
-Logging entries from all appenders can be sent using OpenTelemetry Logging.
+Logging entries from all appenders can be sent by using OpenTelemetry Logging.
For details, see the Quarkus xref:opentelemetry-logging.adoc[OpenTelemetry Logging] guide.
@@ -677,7 +682,7 @@ Enable proper logging for `@QuarkusTest` by setting the `java.util.logging.manag
The system property must be set early on to be effective, so it is recommended to configure it in the build system.
-.Setting the `java.util.logging.manager` system property in the Maven Surefire plugin configuration
+.Setting the `java.util.logging.manager` system property in the Maven Surefire plugin configuration:
[source, xml]
----
@@ -822,7 +827,7 @@ To add data to the MDC and extract it in your log output:
. Use the `MDC` class to set the data.
.. Add `import org.jboss.logmanager.MDC;`
-.. Set `MDC.put(...)` as shown in the example below:
+.. Set `MDC.put(...)` as shown in the example below:
+
[source,java]
.An example with JBoss Logging and `io.quarkus.logging.Log`
diff --git a/docs/src/main/asciidoc/maven-tooling.adoc b/docs/src/main/asciidoc/maven-tooling.adoc
index d334224fc0665..8ac4e7ddf821e 100644
--- a/docs/src/main/asciidoc/maven-tooling.adoc
+++ b/docs/src/main/asciidoc/maven-tooling.adoc
@@ -509,13 +509,18 @@ Note that in this case the build itself runs in a Docker container too, so you d
[TIP]
====
-By default, the native executable will be generated using the `quay.io/quarkus/ubi-quarkus-mandrel-builder-image:{mandrel-flavor}` Docker image.
+By default, the native executable will be generated using the `quay.io/quarkus/ubi9-quarkus-mandrel-builder-image:{mandrel-flavor}` Docker image.
If you want to build a native executable with a different Docker image (for instance to use a different GraalVM version),
use the `-Dquarkus.native.builder-image=` build argument.
-The list of the available Docker images can be found on https://quay.io/repository/quarkus/ubi-quarkus-mandrel-builder-image?tab=tags[quay.io].
+The list of the available Docker images can be found on https://quay.io/repository/quarkus/ubi9-quarkus-mandrel-builder-image?tab=tags[quay.io].
Be aware that a given Quarkus version might not be compatible with all the images available.
+
+Starting from Quarkus 3.19, the _builder_ image is based on UBI 9, and thus requires an UBI 9 base image if you want to run the native executable in a container.
+You can switch back to UBI 8, by setting the `quarkus.native.builder-image` property to one of the available image from the https://quay.io/repository/quarkus/ubi-quarkus-mandrel-builder-image?tab=tags[quay.io repository].
+For example ``quay.io/quarkus/ubi-quarkus-mandrel-builder-image:{mandrel-flavor}` is using UBI 8, and so the resulting native executable will be compatible with UBI 8 base images.
+
====
You can follow the xref:building-native-image.adoc[Build a native executable guide] as well as xref:deploying-to-kubernetes.adoc[Deploying Application to Kubernetes and OpenShift] for more information.
@@ -1057,6 +1062,9 @@ Here is a list of system properties the Quarkus bootstrap Maven resolver checks
| `false`
| By default, the Quarkus Maven resolver is reading project's POMs directly when discovering the project's layout. While in most cases it works well enough and relatively fast, reading raw POMs has its limitation. E.g. if a POM includes modules in a profile, these modules will not be discovered. This system property enables project's layout discovery based on the effective POM models, that are properly interpolated, instead of the raw ones. The reason this option is not enabled by default is it may appear to be significantly more time-consuming that could increase, e.g. CI testing times. Until there is a better approach found that could be used by default, projects that require it should enable this option.
+| `quarkus.bootstrap.legacy-model-resolver`
+| `false`
+| This *system* or *POM* property can be used to enable the legacy `ApplicationModel` resolver implementation. The property was introduced in Quarkus 3.19.0 and will be removed once the legacy implementation is known to be not in demand.
|===
These system properties above could be added to, e.g., a `surefire` and/or `failsafe` plugin configuration as
diff --git a/docs/src/main/asciidoc/messaging.adoc b/docs/src/main/asciidoc/messaging.adoc
index e7cc35d6d2b36..478123e5f3a0a 100644
--- a/docs/src/main/asciidoc/messaging.adoc
+++ b/docs/src/main/asciidoc/messaging.adoc
@@ -356,6 +356,80 @@ public class MyProfileBean {
}
----
+==== Pausable Channels
+
+Injected `@Channel` streams are not subscribed to by default, so the flow of messages is controlled by the application code using reactive streams and Mutiny APIs.
+But for `@Incoming` methods, the flow of messages is controlled by the runtime.
+
+Pausable channels provide a mechanism to control message flow programmatically.
+This is useful in scenarios where producers or consumers need to stop temporarily due to managing the lifecycle or performing maintenance operations.
+
+To use pausable channels, you need to activate it with the configuration property `pausable` set to `true`.
+
+[source, properties]
+----
+mp.messaging.incoming.my-channel.pausable=true
+# optional, by default the channel is NOT paused initially
+mp.messaging.outgoing.my-channel.initially-paused=true
+----
+
+If a channel is configured to be pausable, you can get the `PausableChannel` by channel name from the `ChannelRegistry` programmatically, and pause or resume the channel as needed:
+
+[source, java]
+----
+import jakarta.annotation.PostConstruct;
+import jakarta.enterprise.context.ApplicationScoped;
+import jakarta.inject.Inject;
+
+import org.eclipse.microprofile.reactive.messaging.Incoming;
+
+import io.smallrye.reactive.messaging.ChannelRegistry;
+import io.smallrye.reactive.messaging.PausableChannel;
+
+@ApplicationScoped
+public class PausableController {
+
+ @Inject
+ ChannelRegistry registry;
+
+ @PostConstruct
+ public void resume() {
+ // Wait for the application to be ready
+ // Retrieve the pausable channel
+ PausableChannel pausable = registry.getPausable("my-channel");
+ // Pause the processing of the messages
+ pausable.resume();
+ }
+
+ public void pause() {
+ // Retrieve the pausable channel
+ PausableChannel pausable = registry.getPausable("my-channel");
+ // Pause the processing of the messages
+ pausable.pause();
+ }
+
+ @Incoming("my-channel")
+ void process(String message) {
+ // Process the message
+ }
+
+}
+----
+
+This feature is independent of connectors and can be in theory used with channels backed by any connector.
+Note that pausing message consumption applies back-pressure on the underlying consumer which receives messages from the remote broker.
+
+[NOTE]
+====
+Kafka consumers provide a similar feature to pause and resume the consumption of messages from topic-partitions.
+The Quarkus Kafka connector allows xref:kafka.adoc#kafka-bare-clients[access to the underlying client] to pause/resume the consumption.
+
+However, by default, with the `pause-if-no-requests=true` configuration,
+the connector handles automatically the back-pressure,
+by the pausing and resuming the Kafka consumer based on downstream requests.
+It is therefore recommended to use pausable channels with the default `pause-if-no-requests=true` configuration.
+====
+
==== Multiple Outgoings and `@Broadcast`
By default, messages transmitted in a channel are only dispatched to a single consumer.
diff --git a/docs/src/main/asciidoc/native-reference.adoc b/docs/src/main/asciidoc/native-reference.adoc
index 57dd02b740429..d3751eb901087 100644
--- a/docs/src/main/asciidoc/native-reference.adoc
+++ b/docs/src/main/asciidoc/native-reference.adoc
@@ -608,7 +608,7 @@ $ ./mvnw verify -DskipITs=false -Dquarkus.test.integration-test-profile=test-wit
[INFO] T E S T S
[INFO] -------------------------------------------------------
[INFO] Running org.acme.GreetingResourceIT
-2024-05-14 16:29:53,941 INFO [io.qua.tes.com.DefaultDockerContainerLauncher] (main) Executing "podman run --name quarkus-integration-test-PodgW -i --rm --user 501:20 -p 8081:8081 -p 8444:8444 --entrypoint java -v /tmp/new-project/target:/project --env QUARKUS_LOG_CATEGORY__IO_QUARKUS__LEVEL=INFO --env QUARKUS_HTTP_PORT=8081 --env QUARKUS_HTTP_SSL_PORT=8444 --env TEST_URL=http://localhost:8081 --env QUARKUS_PROFILE=test-with-native-agent --env QUARKUS_TEST_INTEGRATION_TEST_PROFILE=test-with-native-agent quay.io/quarkus/ubi-quarkus-mandrel-builder-image:jdk-21 -agentlib:native-image-agent=access-filter-file=quarkus-access-filter.json,caller-filter-file=quarkus-caller-filter.json,config-output-dir=native-image-agent-base-config, -jar quarkus-app/quarkus-run.jar"
+2024-05-14 16:29:53,941 INFO [io.qua.tes.com.DefaultDockerContainerLauncher] (main) Executing "podman run --name quarkus-integration-test-PodgW -i --rm --user 501:20 -p 8081:8081 -p 8444:8444 --entrypoint java -v /tmp/new-project/target:/project --env QUARKUS_LOG_CATEGORY__IO_QUARKUS__LEVEL=INFO --env QUARKUS_HTTP_PORT=8081 --env QUARKUS_HTTP_SSL_PORT=8444 --env TEST_URL=http://localhost:8081 --env QUARKUS_PROFILE=test-with-native-agent --env QUARKUS_TEST_INTEGRATION_TEST_PROFILE=test-with-native-agent quay.io/quarkus/ubi9-quarkus-mandrel-builder-image:jdk-21 -agentlib:native-image-agent=access-filter-file=quarkus-access-filter.json,caller-filter-file=quarkus-caller-filter.json,config-output-dir=native-image-agent-base-config, -jar quarkus-app/quarkus-run.jar"
...
[INFO]
[INFO] --- quarkus:{quarkus-version}:native-image-agent (default) @ new-project ---
@@ -862,7 +862,7 @@ So, go ahead and add the following options to that file:
[source,properties,subs=attributes+]
----
quarkus.native.container-build=true
-quarkus.native.builder-image=quay.io/quarkus/ubi-quarkus-mandrel-builder-image:{mandrel-flavor}
+quarkus.native.builder-image=quay.io/quarkus/ubi9-quarkus-mandrel-builder-image:{mandrel-flavor}
quarkus.container-image.build=true
quarkus.container-image.group=test
----
@@ -1282,7 +1282,7 @@ These are called expert options and you can learn more about them by running:
[source,bash,subs=attributes+]
----
-docker run quay.io/quarkus/ubi-quarkus-mandrel-builder-image:{mandrel-flavor} --expert-options-all
+docker run quay.io/quarkus/ubi9-quarkus-mandrel-builder-image:{mandrel-flavor} --expert-options-all
----
[WARNING]
@@ -2478,7 +2478,7 @@ E.g.
[source,bash,subs=attributes+]
----
./mvnw package -DskipTests -Dnative -Dquarkus.native.container-build=true \
- -Dquarkus.native.builder-image=quay.io/quarkus/ubi-quarkus-mandrel-builder-image:{mandrel-flavor} \
+ -Dquarkus.native.builder-image=quay.io/quarkus/ubi9-quarkus-mandrel-builder-image:{mandrel-flavor} \
-Dquarkus.native.monitoring=jfr
----
diff --git a/docs/src/main/asciidoc/observability-devservices-lgtm.adoc b/docs/src/main/asciidoc/observability-devservices-lgtm.adoc
index 5d207a4791ecc..37fc37fa7cb65 100644
--- a/docs/src/main/asciidoc/observability-devservices-lgtm.adoc
+++ b/docs/src/main/asciidoc/observability-devservices-lgtm.adoc
@@ -4,14 +4,14 @@ and pull requests should be submitted there:
https://github.com/quarkusio/quarkus/tree/main/docs/src/main/asciidoc
////
= Observability Dev Services with Grafana OTel LGTM
-
include::_attributes.adoc[]
:categories: observability,devservices,telemetry,metrics,tracing,logging, opentelemetry, micrometer, prometheus, tempo, loki, grafana
:summary: Instructions on how to use Grafana Otel LGTM
:topics: observability,grafana,lgtm,otlp,opentelemetry,devservices,micrometer
:extensions: io.quarkus:quarkus-observability-devservices
-https://github.com/grafana/docker-otel-lgtm[OTel-LGTM] is `all-in-one` Docker image containing OpenTelemetry's https://github.com/open-telemetry/opentelemetry-proto/blob/main/docs/README.md[OTLP] as the protocol to transport metrics, tracing and logging data to an https://opentelemetry.io/docs/collector[OpenTelemetry Collector] which then stores signals data into https://prometheus.io/[Prometheus] (metrics), https://github.com/grafana/tempo[Tempo] (traces) and https://github.com/grafana/loki[Loki] (logs), only to have it visualized by https://github.com/grafana/grafana[Grafana]. It's used by Quarkus Observability to provide the Grafana OTel LGTM Dev Resource.
+This Dev Service provides the https://github.com/grafana/docker-otel-lgtm[Grafana OTel-LGTM], an `all-in-one` Docker image containing an https://opentelemetry.io/docs/collector[OpenTelemetry Collector] receiving and then forwarding telemetry data to https://prometheus.io/[Prometheus] (metrics), https://github.com/grafana/tempo[Tempo] (traces) and https://github.com/grafana/loki[Loki] (logs).
+This data can then be visualized by https://github.com/grafana/grafana[Grafana].
== Configuring your project
@@ -33,9 +33,35 @@ Add the Quarkus Grafana OTel LGTM sink (where data goes) extension to your build
implementation("io.quarkus:quarkus-observability-devservices-lgtm")
----
-=== Metrics
+=== Micrometer
+
+The https://quarkus.io/guides/telemetry-micrometer[Micrometer Quarkus extension] provides metrics from automatic instrumentation implemented in Quarkus and its extensions.
+
+There are multiple ways to output Micrometer metrics. Next there are some examples:
+
+==== Using the Micrometer Prometheus registry
+
+This is the most common way to output metrics from Micrometer and the default way in Quarkus. The Micrometer Prometheus registry will publish data in the `/q/metrics` endpoint and a scraper inside the Grafana LGTM Dev Service will grab it (*pull* data from the service).
+
+
+[source,xml,role="primary asciidoc-tabs-target-sync-cli asciidoc-tabs-target-sync-maven"]
+.pom.xml
+----
+
+ io.quarkiverse.micrometer.registry
+ quarkus-micrometer-registry-prometheus
+
+----
+
+[source,gradle,role="secondary asciidoc-tabs-target-sync-gradle"]
+.build.gradle
+----
+implementation("io.quarkiverse.micrometer.registry:quarkus-micrometer-registry-prometheus")
+----
+
+==== Using the Micrometer OTLP registry
-If you need metrics, add the Micrometer OTLP registry to your build file:
+The https://docs.quarkiverse.io/quarkus-micrometer-registry/dev/micrometer-registry-otlp.html[Quarkiverse Micrometer OTLP registry] will output data using the OpenTelemetry OTLP protocol to the Grafana LGTM Dev Service. This will *push* data out of the service:
[source,xml,role="primary asciidoc-tabs-target-sync-cli asciidoc-tabs-target-sync-maven"]
.pom.xml
@@ -52,11 +78,15 @@ If you need metrics, add the Micrometer OTLP registry to your build file:
implementation("io.quarkiverse.micrometer.registry:quarkus-micrometer-registry-otlp")
----
-When using the https://micrometer.io/[MicroMeter's] Quarkiverse OTLP registry to push metrics to Grafana OTel LGTM, the `quarkus.micrometer.export.otlp.url` property is automatically set to OTel collector endpoint as seen from the outside of the docker container.
+When using the https://micrometer.io/[Micrometer's] Quarkiverse OTLP registry to push metrics to Grafana OTel LGTM, the `quarkus.micrometer.export.otlp.url` property is automatically set to OTel collector endpoint as seen from the outside of the Docker container.
+
+=== OpenTelemetry
-=== Tracing
+With OpenTelemetry, metrics, traces and logs can be created and sent to the Grafana LGTM Dev Service.
-For Tracing add the `quarkus-opentelemetry` extension to your build file:
+By default, the https://quarkus.io/guides/opentelemetry[OpenTelemetry extension] will produce https://quarkus.io/guides/opentelemetry-tracing[traces]. https://quarkus.io/guides/opentelemetry-metrics[Metrics] and https://quarkus.io/guides/opentelemetry-logging[logs] must be enabled separately.
+
+The `quarkus-opentelemetry` extension can be added to your build file like this:
[source,xml,role="primary asciidoc-tabs-target-sync-cli asciidoc-tabs-target-sync-maven"]
.pom.xml
----
@@ -72,11 +102,36 @@ For Tracing add the `quarkus-opentelemetry` extension to your build file:
implementation("io.quarkus:quarkus-opentelemetry")
----
-The `quarkus.otel.exporter.otlp.endpoint` property is automatically set to OTel collector endpoint as seen from the outside of the docker container.
+The `quarkus.otel.exporter.otlp.endpoint` property is automatically set to the OTel collector endpoint as seen from the outside of the Docker container.
The `quarkus.otel.exporter.otlp.protocol` is set to `http/protobuf`.
-=== Access Grafana
+=== Micrometer to OpenTelemetry bridge
+
+This extension provides Micrometer metrics and OpenTelemetry metrics, traces and logs. All data is managed and sent out by the OpenTelemetry extension.
+
+**All signals are enabled by default.**
+
+The extension can be added to your build file like this:
+
+[source,xml,role="primary asciidoc-tabs-target-sync-cli asciidoc-tabs-target-sync-maven"]
+.pom.xml
+----
+
+ io.quarkus
+ quarkus-micrometer-opentelemetry
+
+----
+
+[source,gradle,role="secondary asciidoc-tabs-target-sync-gradle"]
+.build.gradle
+----
+implementation("io.quarkus:quarkus-micrometer-opentelemetry")
+----
+
+== Grafana
+
+=== Grafana UI access
Once you start your app in dev mode:
@@ -87,19 +142,43 @@ You will see a log entry like this:
[source, log]
----
[io.qu.ob.de.ObservabilityDevServiceProcessor] (build-35) Dev Service Lgtm started, config: {grafana.endpoint=http://localhost:42797, quarkus.otel.exporter.otlp.endpoint=http://localhost:34711, otel-collector.url=localhost:34711, quarkus.micrometer.export.otlp.url=http://localhost:34711/v1/metrics, quarkus.otel.exporter.otlp.protocol=http/protobuf}
-
----
Remember that Grafana is accessible in an ephemeral port, so you need to check the logs to see which port is being used. In this example, the grafana endpoint is `grafana.endpoint=http://localhost:42797`.
-If you miss the message you can always check the port with this Docker command:
-[source, bash]
-----
-docker ps | grep grafana
-----
+Another option is to use the *Dev UI* (http://localhost:8080/q/dev-ui/extensions) as the Grafana URL link will be available and if selected it will open a new browser tab directly to the running Grafana instance:
+
+image::dev-ui-observability-card.png[alt=Dev UI LGTM, align=center,width=50%]
+
+=== Explore
+
+In the explore section, you can query the data for all the data sources.
+
+To see traces, select the `tempo` data source and query for data:
+
+image::observability-grafana-tempo.png[alt=Dev UI LGTM, align=center,width=90%]
+
+For logs, select the `loki` data source and query for data:
+
+image::observability-grafana-loki.png[alt=Dev UI LGTM, align=center,width=90%]
+
+=== The dashboards
+
+The Dev Service includes a set of dashboards.
+
+image::observability-grafana-dashboards.png[alt=Dev UI LGTM, align=center,width=90%]
+
+Each dashboard is tuned for the specific application setup. The available dashboards are:
+
+* *Quarkus Micrometer OpenTelemetry*: to be used with the Micrometer and OpenTelemetry extension.
+* *Quarkus Micrometer OTLP registry*: to be used with the Micrometer OTLP registry extension.
+* *Quarkus Micrometer Prometheus registry*: to be used with the Micrometer Prometheus registry extension.
+* *Quarkus OpenTelemetry Logging*: to view logs coming from the OpenTelemetry extension.
-Another option is to use the Dev UI as the Grafana URL link will be available and if selected will open a new browser tab directly to the running Grafana instance:
-image::dev-ui-observability-card.png[alt=Dev UI LGTM, align=center,width=80%]
+[NOTE]
+====
+Some panels in the dashboards might take a few minutes to show accurate data when their values are calculated over a sliding time window.
+====
=== Additional configuration
diff --git a/docs/src/main/asciidoc/openapi-swaggerui.adoc b/docs/src/main/asciidoc/openapi-swaggerui.adoc
index 356bb5156445d..e69f2935bc41d 100644
--- a/docs/src/main/asciidoc/openapi-swaggerui.adoc
+++ b/docs/src/main/asciidoc/openapi-swaggerui.adoc
@@ -165,7 +165,7 @@ public class FruitResourceTest {
Quarkus provides the https://github.com/smallrye/smallrye-open-api/[SmallRye OpenAPI] extension compliant with the
https://github.com/eclipse/microprofile-open-api/[MicroProfile OpenAPI]
specification in order to generate your API
-https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.0.md[OpenAPI v3 specification].
+https://spec.openapis.org/oas/v3.1.0.html[OpenAPI v3 specification].
You just need to add the `openapi` extension to your Quarkus application:
@@ -198,7 +198,7 @@ Once your application is started, you can make a request to the default `/q/open
[source,shell]
----
$ curl http://localhost:8080/q/openapi
-openapi: 3.0.3
+openapi: 3.1.0
info:
title: Generated API
version: "1.0"
@@ -359,7 +359,7 @@ Remember that setting fields on the schema will override what has been generated
=== Runtime filters
-Runtime filters by default runs on startup (when the final OpenAPI document gets created). You can change runtime filters to run on every request, making the openapi document dynamic.
+Runtime filters by default runs on startup (when the final OpenAPI document gets created). You can change runtime filters to run on every request, making the OpenAPI document dynamic.
To do this you need to set this propery: `quarkus.smallrye-openapi.always-run-filter=true`.
== Loading OpenAPI Schema From Static Files
@@ -373,7 +373,7 @@ Quarkus also supports alternative <>
[source,yaml]
----
-openapi: 3.0.1
+openapi: 3.1.0
info:
title: Static OpenAPI document of fruits resource
description: Fruit resources Open API documentation
@@ -446,16 +446,23 @@ Live reload of static OpenAPI document is supported during development. A modifi
== Changing the OpenAPI version
-By default, when the document is generated, the OpenAPI version used will be `3.0.3`. If you use a static file as mentioned above, the version in the file
+By default, when the document is generated, the OpenAPI version used will be `3.1.0`. If you use a static file as mentioned above, the version in the file
will be used. You can also define the version in SmallRye using the following configuration:
[source, properties]
----
-mp.openapi.extensions.smallrye.openapi=3.0.2
+mp.openapi.extensions.smallrye.openapi=3.0.4
----
This might be useful if your API goes through a Gateway that needs a certain version.
+[NOTE]
+====
+Changing the OpenAPI version between `3.0.x` and `3.1.x` versions will result in changes to the rendered document to satisfy the requirements
+of the chosen version. A good starting point to learn about the differences between OpenAPI 3.0 and 3.1 is the
+https://www.openapis.org/blog/2021/02/16/migrating-from-openapi-3-0-to-3-1-0[OpenAPI Initiative].
+====
+
== Auto-generation of Operation Id
The https://swagger.io/docs/specification/paths-and-operations/[Operation Id] can be set using the `@Operation` annotation, and is in many cases useful when using a tool to generate a client stub from the schema.
diff --git a/docs/src/main/asciidoc/platform.adoc b/docs/src/main/asciidoc/platform.adoc
index 6b14645a13c3e..5b40093988e0a 100644
--- a/docs/src/main/asciidoc/platform.adoc
+++ b/docs/src/main/asciidoc/platform.adoc
@@ -129,9 +129,25 @@ A platform properties file for the example above would contain:
[source,text,subs=attributes+]
----
-platform.quarkus.native.builder-image=quay.io/quarkus/ubi-quarkus-mandrel-builder-image:{mandrel-flavor}
+platform.quarkus.native.builder-image=quay.io/quarkus/ubi9-quarkus-mandrel-builder-image:{mandrel-flavor}
----
+[IMPORTANT]
+====
+Starting with Quarkus 3.19+, the _builder_ image used to build the native executable is based on UBI 9.
+It means that the native executable produced by the container build will be based on UBI 9 as well.
+So, if you plan to build a container, make sure that the base image in your `Dockerfile` is compatible with UBI 9.
+The native executable will not run on UBI 8 base images.
+
+For example to switch back to an UBI8 _builder image_ you can use:
+
+`platform.quarkus.native.builder-image=quay.io/quarkus/ubi-quarkus-mandrel-builder-image:{mandrel-flavor}`
+
+You can see the available tags for UBI8 https://quay.io/repository/quarkus/ubi-quarkus-mandrel-builder-image?tab=tags[here]
+and for UBI9 https://quay.io/repository/quarkus/ubi9-quarkus-mandrel-builder-image?tab=tags[here (UBI 9)])
+====
+
+
There is also a Maven plugin goal that validates the platform properties content and its artifact coordinates and also checks whether the platform properties artifact is present in the platform's BOM. Here is a sample plugin configuration:
[source,xml]
diff --git a/docs/src/main/asciidoc/quarkus-runtime-base-image.adoc b/docs/src/main/asciidoc/quarkus-runtime-base-image.adoc
index c9640697f42ac..d2ebb7802c7db 100644
--- a/docs/src/main/asciidoc/quarkus-runtime-base-image.adoc
+++ b/docs/src/main/asciidoc/quarkus-runtime-base-image.adoc
@@ -22,10 +22,9 @@ In your `Dockerfile`, just use:
[source, dockerfile]
----
-FROM quay.io/quarkus/quarkus-micro-image:2.0
+FROM quay.io/quarkus/ubi9-quarkus-micro-image:2.0
WORKDIR /work/
-COPY target/*-runner /work/application
-RUN chmod 775 /work
+COPY --chmod=0755 target/*-runner /work/application
EXPOSE 8080
CMD ["./application", "-Dquarkus.http.host=0.0.0.0"]
----
@@ -39,11 +38,11 @@ In this case, you need to use a multi-stage `dockerfile` to copy the required li
[source, dockerfile]
----
# First stage - install the dependencies in an intermediate container
-FROM registry.access.redhat.com/ubi8/ubi-minimal:8.10 as BUILD
+FROM registry.access.redhat.com/ubi9/ubi-minimal:9.5 as BUILD
RUN microdnf install freetype
# Second stage - copy the dependencies
-FROM quay.io/quarkus/quarkus-micro-image:2.0
+FROM quay.io/quarkus/ubi9-quarkus-micro-image:2.0
COPY --from=BUILD \
/lib64/libfreetype.so.6 \
/lib64/libbz2.so.1 \
@@ -51,8 +50,7 @@ COPY --from=BUILD \
/lib64/
WORKDIR /work/
-COPY target/*-runner /work/application
-RUN chmod 775 /work
+COPY --chmod=0755 target/*-runner /work/application
EXPOSE 8080
CMD ["./application", "-Dquarkus.http.host=0.0.0.0"]
----
@@ -62,11 +60,11 @@ If you need to have access to the full AWT support, you need more than just `lib
[source, dockerfile]
----
# First stage - install the dependencies in an intermediate container
-FROM registry.access.redhat.com/ubi8/ubi-minimal:8.10 as BUILD
+FROM registry.access.redhat.com/ubi9/ubi-minimal:9.5 as BUILD
RUN microdnf install freetype fontconfig
# Second stage - copy the dependencies
-FROM quay.io/quarkus/quarkus-micro-image:2.0
+FROM quay.io/quarkus/ubi9-quarkus-micro-image:2.0
COPY --from=BUILD \
/lib64/libfreetype.so.6 \
/lib64/libgcc_s.so.1 \
@@ -95,8 +93,7 @@ COPY --from=BUILD \
/etc/fonts /etc/fonts
WORKDIR /work/
-COPY target/*-runner /work/application
-RUN chmod 775 /work
+COPY --chmod=0755 target/*-runner /work/application
EXPOSE 8080
CMD ["./application", "-Dquarkus.http.host=0.0.0.0"]
----
@@ -104,7 +101,8 @@ CMD ["./application", "-Dquarkus.http.host=0.0.0.0"]
== Alternative - Using ubi-minimal
-If the micro image does not suit your requirements, you can use https://catalog.redhat.com/software/containers/ubi8/ubi-minimal/5c359a62bed8bd75a2c3fba8[ubi8/ubi-minimal].
+
+If the micro image does not suit your requirements, you can use https://catalog.redhat.com/software/containers/ubi9-minimal/61832888c0d15aff4912fe0d[ubi9-minimal].
It's a bigger image, but contains more utilities and is closer to a full Linux distribution.
Typically, it contains a package manager (`microdnf`), so you can install packages more easily.
@@ -112,12 +110,12 @@ To use this base image, use the following `Dockerfile`:
[source, dockerfile]
----
-FROM registry.access.redhat.com/ubi8/ubi-minimal:8.10
+FROM registry.access.redhat.com/ubi9/ubi-minimal:9.5
WORKDIR /work/
RUN chown 1001 /work \
&& chmod "g+rwX" /work \
&& chown 1001:root /work
-COPY --chown=1001:root target/*-runner /work/application
+COPY --chown=1001:root --chmod=0755 target/*-runner /work/application
EXPOSE 8080
USER 1001
diff --git a/docs/src/main/asciidoc/scheduler-reference.adoc b/docs/src/main/asciidoc/scheduler-reference.adoc
index bc15e9117e74f..72531ad0abf01 100644
--- a/docs/src/main/asciidoc/scheduler-reference.adoc
+++ b/docs/src/main/asciidoc/scheduler-reference.adoc
@@ -442,6 +442,57 @@ class MyService {
NOTE: A CDI event is fired synchronously and asynchronously when the scheduler or a scheduled job is paused/resumed. The payload is `io.quarkus.scheduler.SchedulerPaused`, `io.quarkus.scheduler.SchedulerResumed`, `io.quarkus.scheduler.ScheduledJobPaused` and `io.quarkus.scheduler.ScheduledJobResumed` respectively.
+[[scheduling_long_running_tasks]]
+== Scheduling Long-Running Tasks
+
+Executing a long-running task might yield a warning message similar to the following:
+
+[source,java]
+----
+WARN [io.ver.cor.imp.BlockedThreadChecker] (vertx-blocked-thread-checker) Thread Thread[vert.x-worker-thread-1,5,main] has been blocked for 81879 ms, time limit is 60000 ms: io.vertx.core.VertxException: Thread blocked
+----
+
+This is happening because the default worker thread pool is coming from Vert.x which guards against threads being blocked for far too long.
+
+NOTE: The amount of time for which a Vert.x worker thread can be blocked is also https://quarkus.io/guides/all-config#quarkus-vertx_quarkus-vertx-max-worker-execute-time[configurable].
+
+Therefore, a proper way to execute long tasks is to offload them from the scheduled method to a custom executor service.
+Here's an example of such setup for a long-running task that we do not expect to execute often:
+
+[source,java]
+----
+@ApplicationScoped
+public class LongRunner implements Runnable {
+
+ private ExecutorService executorService;
+
+ @PostConstruct
+ void init() {
+ executorService = Executors.newThreadPerTaskExecutor(Executors.defaultThreadFactory()); <1>
+ }
+
+ @PreDestroy
+ void destroy() {
+ executorService.shutdown(); <2>
+ }
+
+
+ @Scheduled(cron = "{my.schedule}")
+ public void update() {
+ executorService.execute(this); <3>
+ }
+
+ @Override
+ public void run() { <4>
+ // perform the actual task here
+ }
+}
+----
+<1> Create a fitting executor. In this case, a new thread is created per scheduled task and stopped once the task finishes.
+<2> `@PreDestroy` callback is used to shut down the executor service.
+<3> Scheduled method only delegates the job to the custom executor - this prevent Vert.x thread from being blocked.
+<4> The bean implements `Runnable`, a format we can directly pass to the executor service as a parameter.
+
[[programmatic_scheduling]]
== Programmatic Scheduling
diff --git a/docs/src/main/asciidoc/security-getting-started-tutorial.adoc b/docs/src/main/asciidoc/security-getting-started-tutorial.adoc
index 72fee1f4e9401..d4d214b6fa9fb 100644
--- a/docs/src/main/asciidoc/security-getting-started-tutorial.adoc
+++ b/docs/src/main/asciidoc/security-getting-started-tutorial.adoc
@@ -4,7 +4,7 @@ and pull requests should be submitted there:
https://github.com/quarkusio/quarkus/tree/main/docs/src/main/asciidoc
////
[id="security-getting-started-tutorial"]
-= Getting started with Security by using Basic authentication and Jakarta Persistence
+= Getting started with security by using Basic authentication and Jakarta Persistence
include::_attributes.adoc[]
:diataxis-type: tutorial
:categories: security,getting-started
@@ -158,6 +158,7 @@ endif::no-quarkus-security-jpa-reactive[]
Add a regular Jakarta REST resource to your Java source code, as shown in the following code snippet:
+
====
+.`src/main/java/org/acme/security/jpa/PublicResource.java`
[source,java]
----
package org.acme.security.jpa;
@@ -185,6 +186,7 @@ The source code for the `/api/admin` endpoint is similar, but instead, you use a
Add a Jakarta REST resource with the following `@RolesAllowed` annotation:
+
====
+.`src/main/java/org/acme/security/jpa/AdminResource.java`
[source,java]
----
package org.acme.security.jpa;
@@ -211,12 +213,12 @@ public class AdminResource {
Use `SecurityContext` to get access to the currently authenticated `Principal` user and to return their username, all of which is retrieved from the database.
+
====
+.`src/main/java/org/acme/security/jpa/UserResource.java`
[source,java]
----
package org.acme.security.jpa;
import jakarta.annotation.security.RolesAllowed;
-import jakarta.inject.Inject;
import jakarta.ws.rs.GET;
import jakarta.ws.rs.Path;
import jakarta.ws.rs.core.Context;
@@ -238,8 +240,9 @@ public class UserResource {
[[define-the-user-entity]]
== Define the user entity
-* You can now describe how you want security information to be stored in the model by adding annotations to the `user` entity, as outlined in the following code snippet:
+Specify how security information is stored in the model by adding the following annotations to the `user` entity:
+.`src/main/java/org/acme/security/jpa/User.java`
[source,java]
----
package org.acme.security.jpa;
@@ -316,10 +319,11 @@ When secure access is required, and no other authentication mechanisms are enabl
Therefore, in this tutorial, you do not need to set the property `quarkus.http.auth.basic` to `true`.
====
+
-. Configure at least one data source in the `application.properties` file so the `quarkus-security-jpa` extension can access your database.
+. Configure at least one data source in the `src/main/resources/application.properties` file so the `quarkus-security-jpa` extension can access your database.
For example:
+
====
+.src/main/resources/application.properties
[source,properties]
----
quarkus.http.auth.basic=true
@@ -327,7 +331,7 @@ quarkus.http.auth.basic=true
%prod.quarkus.datasource.db-kind=postgresql
%prod.quarkus.datasource.username=quarkus
%prod.quarkus.datasource.password=quarkus
-%prod.quarkus.datasource.jdbc.url=jdbc:postgresql:security_jpa
+%prod.quarkus.datasource.jdbc.url=jdbc:postgresql:quarkus
quarkus.hibernate-orm.database.generation=drop-and-create
----
@@ -344,6 +348,7 @@ ifndef::no-quarkus-security-jpa-reactive[]
* The URLs of Reactive datasources that are used by the `quarkus-security-jpa-reactive` extension are set with the `quarkus.datasource.reactive.url`
configuration property and not the `quarkus.datasource.jdbc.url` configuration property typically used by JDBC datasources.
+
+.src/main/resources/application.properties
[source,properties]
----
%prod.quarkus.datasource.reactive.url=vertx-reactive:postgresql://localhost:5431/security_jpa
@@ -356,6 +361,7 @@ Therefore, adjustments are needed in a production environment.
====
endif::no-quarkus-security-jpa-reactive[]
+.`src/main/java/org/acme/security/jpa/Startup.java`
[source,java]
----
package org.acme.security.jpa;
@@ -415,9 +421,11 @@ include::{includes}/devtools/dev.adoc[]
In this scenario, `Dev Services for PostgreSQL` launches and configures a `PostgreSQL` test container.
+Make sure that either `Podman` or `Docker` is installed on your computer.
To write the integration test, use the following code sample:
+.`src/test/java/org/acme/security/jpa/JpaSecurityRealmTest.java`
[source,java]
----
package org.acme.security.jpa;
@@ -486,6 +494,8 @@ public class JpaSecurityRealmTest {
As you can see in this code sample, you do not need to start the test container from the test code.
+To run these tests, choose `Press [r] to resume testing` option which is shown in the console after you started your application in dev mode.
+
[NOTE]
====
When you start your application in dev mode, Dev Services for PostgreSQL launches a PostgreSQL dev mode container so that you can start developing your application.
@@ -493,9 +503,17 @@ While developing your application, you can add and run tests individually by usi
Dev Services for PostgreSQL supports testing while you develop by providing a separate PostgreSQL test container that does not conflict with the dev mode container.
====
+Alternatively, you can run these tests using Maven:
+
+[source,bash,subs=attributes+]
+----
+./mvnw test
+----
+
== Test your application in production mode by using Curl or browser
-To test your application using Curl or the browser, you must first start a PostgreSQL server, then compile and run your application either in JVM or native mode.
+To test your application using Curl or a browser start a PostgreSQL server first.
+Then, compile and run your application in either JVM or native mode.
=== Start the PostgreSQL server
diff --git a/docs/src/main/asciidoc/security-jwt-build.adoc b/docs/src/main/asciidoc/security-jwt-build.adoc
index 931ebdf339139..591788711d305 100644
--- a/docs/src/main/asciidoc/security-jwt-build.adoc
+++ b/docs/src/main/asciidoc/security-jwt-build.adoc
@@ -3,6 +3,7 @@ This guide is maintained in the main Quarkus repository
and pull requests should be submitted there:
https://github.com/quarkusio/quarkus/tree/main/docs/src/main/asciidoc
////
+[id="security-jwt-build"]
= Build, sign, and encrypt JSON Web Tokens
include::_attributes.adoc[]
:categories: security
@@ -79,11 +80,13 @@ JwtClaimsBuilder builder5 = Jwt.claims(token);
The API is fluent so you can initialize the builder as part of a fluent sequence.
The builder automatically sets the following claims if they are not explicitly configured:
+
- `iat` (issued at): Current time
- `exp` (expires at): Five minutes from the current time (customizable with the `smallrye.jwt.new-token.lifespan` property)
- `jti` (unique token identifier)
You can configure the following properties globally to avoid setting them directly in the builder:
+
- `smallrye.jwt.new-token.issuer`: Specifies the default issuer.
- `smallrye.jwt.new-token.audience`: Specifies the default audience.
diff --git a/docs/src/main/asciidoc/security-jwt.adoc b/docs/src/main/asciidoc/security-jwt.adoc
index 539c5961d57a5..d5f3ae002865a 100644
--- a/docs/src/main/asciidoc/security-jwt.adoc
+++ b/docs/src/main/asciidoc/security-jwt.adoc
@@ -14,7 +14,7 @@ include::_attributes.adoc[]
:extensions: io.quarkus:quarkus-smallrye-jwt
This guide explains how to integrate link:https://github.com/smallrye/smallrye-jwt/[SmallRye JWT] into your Quarkus application to implement link:https://tools.ietf.org/html/rfc7519[JSON Web Token (JWT)] security in compliance with the MicroProfile JWT specification.
-You’ll learn how to verify JWTs, represent them as MicroProfile JWT org.eclipse.microprofile.jwt.JsonWebToken, and secure Quarkus HTTP endpoints using bearer token authorization and link:https://en.wikipedia.org/wiki/Role-based_access_control[Role-Based Access Control].
+You’ll learn how to verify JWTs, represent them as MicroProfile JWT `org.eclipse.microprofile.jwt.JsonWebToken`, and secure Quarkus HTTP endpoints using bearer token authorization and link:https://en.wikipedia.org/wiki/Role-based_access_control[Role-Based Access Control].
[NOTE]
====
@@ -145,9 +145,9 @@ public class TokenSecuredResource {
<7> Builds a response containing the caller's name, the `isSecure()` and `getAuthenticationScheme()` states of the request `SecurityContext`, and whether a non-null `JsonWebToken` was injected.
[[run-application]]
-=== Run the application
+=== Run the application in dev mode
-Now you are ready to run our application. Use:
+Now, you are ready to run the application in dev mode by using one of the following commands:
include::{includes}/devtools/dev.adoc[]
@@ -174,6 +174,12 @@ Now that the REST endpoint is running, you can access it by using a command line
[source,shell]
----
$ curl http://127.0.0.1:8080/secured/permit-all; echo
+----
+
+This command returns the following response:
+
+[source,shell]
+----
hello anonymous, isHttps: false, authScheme: null, hasJWT: false
----
@@ -255,9 +261,6 @@ public class TokenSecuredResource {
<3> The `@RolesAllowed` annotation restricts access to users with either the "User" or "Admin" role.
<4> The response is constructed similarly to the `hello` method, with the addition of the `birthdate` claim retrieved directly from the injected `JsonWebToken`.
-
-
-
After you make this addition to your `TokenSecuredResource`, rerun the `./mvnw quarkus:dev` command, and then try `curl -v http://127.0.0.1:8080/secured/roles-allowed; echo` to attempt to access the new endpoint.
Your output should be as follows:
@@ -266,6 +269,12 @@ Your output should be as follows:
[source,shell]
----
$ curl -v http://127.0.0.1:8080/secured/roles-allowed; echo
+----
+
+This command returns the following response:
+
+[source,shell]
+----
* Trying 127.0.0.1...
* TCP_NODELAY set
* Connected to 127.0.0.1 (127.0.0.1) port 8080 (#0)
@@ -284,10 +293,13 @@ $ curl -v http://127.0.0.1:8080/secured/roles-allowed; echo
----
Excellent.
-You have not provided any JWT in the request, so you should not be able to access the endpoint, and you were not able to.
+You did not provide a JWT in the request, so access to the endpoint was correctly denied.
Instead, you received an HTTP 401 Unauthorized error.
-You need to obtain and pass in a valid JWT to access that endpoint.
-There are two steps to this, 1) configuring our {extension-name} extension with information on how to validate a JWT, and 2) generating a matching JWT with the appropriate claims.
+
+To access the endpoint, you must obtain and include a valid JWT in your request. This involves two steps:
+
+. Configuring the {extension-name} extension with the necessary information to validate a JWT.
+. Generating a JWT with the appropriate claims to match the configuration.
=== Configuring the {extension-name} extension security information
@@ -347,31 +359,35 @@ import java.util.Arrays;
import java.util.HashSet;
import org.eclipse.microprofile.jwt.Claims;
-
import io.smallrye.jwt.build.Jwt;
+/**
+ * A utility class to generate and print a JWT token string to stdout.
+ */
public class GenerateToken {
+
/**
- * Generate JWT token
+ * Generates and prints a JWT token.
*/
public static void main(String[] args) {
- String token =
- Jwt.issuer("https://example.com/issuer") // <1>
- .upn("jdoe@quarkus.io") // <2>
- .groups(new HashSet<>(Arrays.asList("User", "Admin"))) // <3>
- .claim(Claims.birthdate.name(), "2001-07-13") // <4>
- .sign();
+ String token = Jwt.issuer("https://example.com/issuer") // <1>
+ .upn("jdoe@quarkus.io") // <2>
+ .groups(new HashSet<>(Arrays.asList("User", "Admin"))) // <3>
+ .claim(Claims.birthdate.name(), "2001-07-13") // <4>
+ .sign();
+
System.out.println(token);
+ System.exit(0);
}
}
----
-<1> Set JWT issuer as an `iss` claim value.
-This must match the server side `mp.jwt.verify.issuer` for the token to be accepted as valid.
-<2> The `upn` claim is defined by the {mp-jwt} spec as the preferred claim to use for the `Principal` seen by the container security APIs.
-<3> The `group` claim provides the groups and top-level roles associated with the JWT bearer.
-<4> The `birthday` claim.
-It can be considered a sensitive claim, so consider encrypting the claims, as described in xref:security-jwt-build.adoc[Generate JWT tokens with SmallRye JWT].
+<1> Sets the `iss` (issuer) claim in the JWT.
+ This value must match the server-side `mp.jwt.verify.issuer` configuration for the token to be considered valid.
+<2> Specifies the `upn` (User Principal Name) claim, which the {mp-jwt} specification defines as the preferred claim for identifying the `Principal` in container security APIs.
+<3> Defines the `groups` claim, which provides the group memberships and top-level roles assigned to the JWT bearer.
+<4> Adds a `birthdate` claim.
+ Because this can be considered sensitive information, consider encrypting claims as described in xref:security-jwt-build.adoc[Generate JWT tokens with SmallRye JWT].
Note that for this code to work, you need the content of the RSA private key corresponding to the public key you have in the `TokenSecuredResource` application.
Take the following PEM content and place it into `security-jwt-quickstart/src/test/resources/privateKey.pem`:
@@ -444,27 +460,32 @@ Next, use the following command to generate the JWT:
[source,shell]
----
$ mvn exec:java -Dexec.mainClass=org.acme.security.jwt.GenerateToken -Dexec.classpathScope=test -Dsmallrye.jwt.sign.key.location=privateKey.pem
-
-eyJraWQiOiJcL3ByaXZhdGVLZXkucGVtIiwidHlwIjoiSldUIiwiYWxnIjoiUlMyNTYifQ.eyJzdWIiOiJqZG9lLXVzaW5nLWp3dC1yYmFjIiwiYXVkIjoidXNpbmctand0LXJiYWMiLCJ1cG4iOiJqZG9lQHF1YXJrdXMuaW8iLCJiaXJ0aGRhdGUiOiIyMDAxLTA3LTEzIiwiYXV0aF90aW1lIjoxNTUxNjU5Njc2LCJpc3MiOiJodHRwczpcL1wvcXVhcmt1cy5pb1wvdXNpbmctand0LXJiYWMiLCJyb2xlTWFwcGluZ3MiOnsiZ3JvdXAyIjoiR3JvdXAyTWFwcGVkUm9sZSIsImdyb3VwMSI6Ikdyb3VwMU1hcHBlZFJvbGUifSwiZ3JvdXBzIjpbIkVjaG9lciIsIlRlc3RlciIsIlN1YnNjcmliZXIiLCJncm91cDIiXSwicHJlZmVycmVkX3VzZXJuYW1lIjoiamRvZSIsImV4cCI6MTU1MTY1OTk3NiwiaWF0IjoxNTUxNjU5Njc2LCJqdGkiOiJhLTEyMyJ9.O9tx_wNNS4qdpFhxeD1e7v4aBNWz1FCq0UV8qmXd7dW9xM4hA5TO-ZREk3ApMrL7_rnX8z81qGPIo_R8IfHDyNaI1SLD56gVX-NaOLS2OjfcbO3zOWJPKR_BoZkYACtMoqlWgIwIRC-wJKUJU025dHZiNL0FWO4PjwuCz8hpZYXIuRscfFhXKrDX1fh3jDhTsOEFfu67ACd85f3BdX9pe-ayKSVLh_RSbTbBPeyoYPE59FW7H5-i8IE-Gqu838Hz0i38ksEJFI25eR-AJ6_PSUD0_-TV3NjXhF3bFIeT4VSaIZcpibekoJg0cQm-4ApPEcPLdgTejYHA-mupb8hSwg
----
-The JWT string is a Base64 URL encoded string with three parts separated by '.' characters.
-First part - JWT headers, second part - JWT claims, third part - JWT signature.
+The JWT string is a Base64 URL-encoded string consisting of three parts, separated by `.` characters:
+
+. The header, which contains metadata about the token, such as the signing algorithm.
+. The payload, also called "claims", which includes the token's claims or data.
+. The signature, which verifies the token's integrity.
=== Finally, secured access to `/secured/roles-allowed`
Now, let's use this to make a secured request to the `/secured/roles-allowed` endpoint.
Make sure you have the Quarkus server still running in dev mode, and then run the following command, making sure to use your version of the generated JWT from the previous step:
-[source,bash]
-----
-curl -H "Authorization: Bearer eyJraWQiOiJcL3ByaXZhdGVLZXkucGVtIiwidHlwIjoiSldUIiwiYWxnIjoiUlMyNTYifQ.eyJzdWIiOiJqZG9lLXVzaW5nLWp3dC1yYmFjIiwiYXVkIjoidXNpbmctand0LXJiYWMiLCJ1cG4iOiJqZG9lQHF1YXJrdXMuaW8iLCJiaXJ0aGRhdGUiOiIyMDAxLTA3LTEzIiwiYXV0aF90aW1lIjoxNTUxNjUyMDkxLCJpc3MiOiJodHRwczpcL1wvcXVhcmt1cy5pb1wvdXNpbmctand0LXJiYWMiLCJyb2xlTWFwcGluZ3MiOnsiZ3JvdXAyIjoiR3JvdXAyTWFwcGVkUm9sZSIsImdyb3VwMSI6Ikdyb3VwMU1hcHBlZFJvbGUifSwiZ3JvdXBzIjpbIkVjaG9lciIsIlRlc3RlciIsIlN1YnNjcmliZXIiLCJncm91cDIiXSwicHJlZmVycmVkX3VzZXJuYW1lIjoiamRvZSIsImV4cCI6MTU1MTY1MjM5MSwiaWF0IjoxNTUxNjUyMDkxLCJqdGkiOiJhLTEyMyJ9.aPA4Rlc4kw7n_OZZRRk25xZydJy_J_3BRR8ryYLyHTO1o68_aNWWQCgpnAuOW64svPhPnLYYnQzK-l2vHX34B64JySyBD4y_vRObGmdwH_SEufBAWZV7mkG3Y4mTKT3_4EWNu4VH92IhdnkGI4GJB6yHAEzlQI6EdSOa4Nq8Gp4uPGqHsUZTJrA3uIW0TbNshFBm47-oVM3ZUrBz57JKtr0e9jv0HjPQWyvbzx1HuxZd6eA8ow8xzvooKXFxoSFCMnxotd3wagvYQ9ysBa89bgzL-lhjWtusuMFDUVYwFqADE7oOSOD4Vtclgq8svznBQ-YpfTHfb9QEcofMlpyjNA" http://127.0.0.1:8080/secured/roles-allowed; echo
-----
.`curl` command for `/secured/roles-allowed` with JWT
[source,shell]
----
$ curl -H "Authorization: Bearer eyJraWQ..." http://127.0.0.1:8080/secured/roles-allowed; echo
+----
+
+Make sure to use the generated token as the HTTP Authorization Bearer scheme value.
+
+This command returns the following response:
+
+[source,shell]
+----
hello jdoe@quarkus.io, isHttps: false, authScheme: Bearer, hasJWT: true, birthdate: 2001-07-13
----
@@ -569,65 +590,64 @@ public class TokenSecuredResource {
Now generate the token again and run:
-[source,bash]
+[source,shell]
----
-curl -H "Authorization: Bearer eyJraWQiOiJcL3ByaXZhdGVLZXkucGVtIiwidHlwIjoiSldUIiwiYWxnIjoiUlMyNTYifQ.eyJzdWIiOiJqZG9lLXVzaW5nLWp3dC1yYmFjIiwiYXVkIjoidXNpbmctand0LXJiYWMiLCJ1cG4iOiJqZG9lQHF1YXJrdXMuaW8iLCJiaXJ0aGRhdGUiOiIyMDAxLTA3LTEzIiwiYXV0aF90aW1lIjoxNTUxNjUyMDkxLCJpc3MiOiJodHRwczpcL1wvcXVhcmt1cy5pb1wvdXNpbmctand0LXJiYWMiLCJyb2xlTWFwcGluZ3MiOnsiZ3JvdXAyIjoiR3JvdXAyTWFwcGVkUm9sZSIsImdyb3VwMSI6Ikdyb3VwMU1hcHBlZFJvbGUifSwiZ3JvdXBzIjpbIkVjaG9lciIsIlRlc3RlciIsIlN1YnNjcmliZXIiLCJncm91cDIiXSwicHJlZmVycmVkX3VzZXJuYW1lIjoiamRvZSIsImV4cCI6MTU1MTY1MjM5MSwiaWF0IjoxNTUxNjUyMDkxLCJqdGkiOiJhLTEyMyJ9.aPA4Rlc4kw7n_OZZRRk25xZydJy_J_3BRR8ryYLyHTO1o68_aNWWQCgpnAuOW64svPhPnLYYnQzK-l2vHX34B64JySyBD4y_vRObGmdwH_SEufBAWZV7mkG3Y4mTKT3_4EWNu4VH92IhdnkGI4GJB6yHAEzlQI6EdSOa4Nq8Gp4uPGqHsUZTJrA3uIW0TbNshFBm47-oVM3ZUrBz57JKtr0e9jv0HjPQWyvbzx1HuxZd6eA8ow8xzvooKXFxoSFCMnxotd3wagvYQ9ysBa89bgzL-lhjWtusuMFDUVYwFqADE7oOSOD4Vtclgq8svznBQ-YpfTHfb9QEcofMlpyjNA" http://127.0.0.1:8080/secured/roles-allowed-admin; echo
+$ curl -H "Authorization: Bearer eyJraWQ..." http://127.0.0.1:8080/secured/roles-allowed-admin; echo
----
+Make sure to use the generated token as the HTTP Authorization Bearer scheme value.
+
+This command returns the following response:
+
[source,shell]
----
-$ curl -H "Authorization: Bearer eyJraWQ..." http://127.0.0.1:8080/secured/roles-allowed-admin; echo
hello jdoe@quarkus.io, isHttps: false, authScheme: Bearer, hasJWT: true, birthdate: 2001-07-13
----
-=== Package and run the application
+=== Run the application in JVM mode
-As usual, the application can be packaged by using:
+You can run the application as a standard Java application.
+. Compile the application:
++
+====
include::{includes}/devtools/build.adoc[]
-
-And executed by using `java -jar target/quarkus-app/quarkus-run.jar`:
-
-.Runner jar example
-[source,shell,subs=attributes+]
+====
+. Run the application:
++
+====
+[source,bash]
----
-$ java -jar target/quarkus-app/quarkus-run.jar
-2019-03-28 14:27:48,839 INFO [io.quarkus] (main) Quarkus {quarkus-version} started in 0.796s. Listening on: http://[::]:8080
-2019-03-28 14:27:48,841 INFO [io.quarkus] (main) Installed features: [cdi, rest, rest-jackson, security, smallrye-jwt]
+java -jar target/quarkus-app/quarkus-run.jar
----
+====
-You can also generate the native executable with:
+=== Run the application in native mode
-include::{includes}/devtools/build-native.adoc[]
+You can compile this same demo into native mode without any modifications.
+This implies that you no longer need to install a JVM on your production environment.
+The runtime technology is included in the produced binary and optimized to run with minimal resources required.
-.Native executable example
-[source,shell]
+Compilation takes a bit longer, so this step is disabled by default.
+
+. Build your application again by enabling the `native` profile:
++
+====
+
+include::{includes}/devtools/build-native.adoc[]
+====
+. Run the following binary directly:
++
+====
+[source,bash]
----
-[INFO] Scanning for projects...
-...
-[security-jwt-quickstart-runner:25602] universe: 493.17 ms
-[security-jwt-quickstart-runner:25602] (parse): 660.41 ms
-[security-jwt-quickstart-runner:25602] (inline): 1,431.10 ms
-[security-jwt-quickstart-runner:25602] (compile): 7,301.78 ms
-[security-jwt-quickstart-runner:25602] compile: 10,542.16 ms
-[security-jwt-quickstart-runner:25602] image: 2,797.62 ms
-[security-jwt-quickstart-runner:25602] write: 988.24 ms
-[security-jwt-quickstart-runner:25602] [total]: 43,778.16 ms
-[INFO] ------------------------------------------------------------------------
-[INFO] BUILD SUCCESS
-[INFO] ------------------------------------------------------------------------
-[INFO] Total time: 51.500 s
-[INFO] Finished at: 2019-03-28T14:30:56-07:00
-[INFO] ------------------------------------------------------------------------
-
-$ ./target/security-jwt-quickstart-runner
-2019-03-28 14:31:37,315 INFO [io.quarkus] (main) Quarkus 0.12.0 started in 0.006s. Listening on: http://[::]:8080
-2019-03-28 14:31:37,316 INFO [io.quarkus] (main) Installed features: [cdi, rest, rest-jackson, security, smallrye-jwt]
+./target/security-jwt-quickstart-1.0.0-SNAPSHOT-runner
----
+====
=== Explore the solution
-The `security-jwt-quickstart` link:{quickstarts-tree-url}/security-jwt-quickstart[directory] repository contains all the versions covered in this quickstart guide, along with additional endpoints that demonstrate subresources using injected `JsonWebToken`s and their claims via CDI APIs.
+The `security-jwt-quickstart` link:{quickstarts-tree-url}/security-jwt-quickstart[directory] repository contains all the versions covered in this quickstart guide, along with additional endpoints that demonstrate subresources using injected `JsonWebToken` tokens and their claims via CDI APIs.
We encourage you to explore the `security-jwt-quickstart` directory and review the quickstart solutions to learn more about the features of the {extension-name} extension.
@@ -1095,7 +1115,7 @@ SmallRye JWT provides more properties that can be used to customize the token pr
|`smallrye.jwt.token.kid`|none|Key identifier. The verification JWK key and every JWT token must have a matching `kid` header if it is set.
|`smallrye.jwt.time-to-live`|none|The maximum number of seconds a JWT can be issued for use. Effectively, the difference between the expiration date of the JWT and the issued at date must not exceed this value. Setting this property to a non-positive value relaxes the requirement for the token to have a valid 'iat' (issued at) claim.
|`smallrye.jwt.require.named-principal`|`true`|If an application relies on `java.security.Principal` returning a name, then a token must have a `upn` or `preferred_username` or `sub` claim set. Setting this property results in SmallRye JWT throwing an exception if none of these claims is available for the application code to deal with a non-null `Principal` name reliably.
-|`smallrye.jwt.path.sub`|none|Path to the claim containing the subject name. It starts from the top-level JSON object and can contain multiple segments where each segment only represents a JSON object name, for example, ' realms/subject`. This property can be used if a token has no 'sub' claim but has the subject set in a different claim. Use double quotes with the namespace-qualified claims.
+|`smallrye.jwt.path.sub`|none|Path to the claim containing the subject name. It starts from the top-level JSON object and can contain multiple segments where each segment only represents a JSON object name, for example, `realms/subject`. This property can be used if a token has no 'sub' claim but has the subject set in a different claim. Use double quotes with the namespace-qualified claims.
|`smallrye.jwt.claims.sub`|none| This property can set a default sub claim value when the current token has no standard or custom `sub` claim available. Effectively, this property can be used to customize the `java.security.Principal` name if no `upn` or `preferred_username` or `sub` claim is set.
|`smallrye.jwt.path.groups`|none|Path to the claim containing the groups. It starts from the top-level JSON object and can contain multiple segments where each segment represents a JSON object name only, for example: `realm/groups`. This property can be used if a token has no 'groups' claim but has the groups set in a different claim. Use double quotes with the namespace-qualified claims.
|`smallrye.jwt.groups-separator`|space|Separator for splitting a string which might contain multiple group values. It is only used if the `smallrye.jwt.path.groups` property points to a custom claim with a string value. The default value is a single space because a standard OAuth2 `scope` claim might contain a space-separated sequence.
@@ -1114,7 +1134,7 @@ SmallRye JWT provides more properties that can be used to customize the token pr
|`smallrye.jwt.client.tls.hosts`|none|Set of trusted hostnames. If the keys have to be fetched over `HTTPS` and `smallrye.jwt.client.tls.trust-all` is set to `false` then this property can be used to configure the trusted hostnames.
|`smallrye.jwt.http.proxy.host`|none|HTTP proxy host.
|`smallrye.jwt.http.proxy.port`|80|HTTP proxy port.
-|`smallrye.jwt.keystore.type`|`JKS`|This property can be used to customize a keystore type if either `mp.jwt.verify.publickey.location` or mp.jwt.decrypt.key.location` points to a `KeyStore` file. If it is not set, the file name is checked to determine the keystore type before defaulting to `JKS`.
+|`smallrye.jwt.keystore.type`|`JKS`|This property can be used to customize a keystore type if either `mp.jwt.verify.publickey.location` or `mp.jwt.decrypt.key.location` points to a `KeyStore` file. If it is not set, the file name is checked to determine the keystore type before defaulting to `JKS`.
|`smallrye.jwt.keystore.provider`||This property can be used to customize a `KeyStore` provider if `mp.jwt.verify.publickey.location` or `mp.jwt.decrypt.key.location` points to a `KeyStore` file.
|`smallrye.jwt.keystore.password`||Keystore password. If `mp.jwt.verify.publickey.location` or `mp.jwt.decrypt.key.location`, this property must be set.
|`smallrye.jwt.keystore.verify.key.alias`||This property has to be set to identify a public verification key which is extracted from `KeyStore` from a matching certificate if `mp.jwt.verify.publickey.location` points to a `KeyStore` file.
diff --git a/docs/src/main/asciidoc/security-keycloak-authorization.adoc b/docs/src/main/asciidoc/security-keycloak-authorization.adoc
index 12a3687e8ee71..58f8d5cdf6f9f 100644
--- a/docs/src/main/asciidoc/security-keycloak-authorization.adoc
+++ b/docs/src/main/asciidoc/security-keycloak-authorization.adoc
@@ -3,6 +3,7 @@ This guide is maintained in the main Quarkus repository.
To contribute, submit a pull request here:
https://github.com/quarkusio/quarkus/tree/main/docs/src/main/asciidoc
////
+[id="security-keycloak-authorization"]
= Using OpenID Connect (OIDC) and Keycloak to centralize authorization
include::_attributes.adoc[]
:diataxis-type: howto
@@ -15,7 +16,9 @@ Learn how to enable bearer token authorization in your Quarkus application by us
== Overview
-The `quarkus-keycloak-authorization` extension builds on the `quarkus-oidc` extension to offer advanced authorization capabilities. It includes a policy enforcer that dynamically regulates access to secured resources. Access is governed by permissions defined in Keycloak, supporting flexible and dynamic Resource-Based Access Control (RBAC).
+The Keycloak Authorization extension, `quarkus-keycloak-authorization`, extends the OpenID Connect extension, `quarkus-oidc`, to provide advanced authorization capabilities.
+It features a policy enforcer that dynamically manages access to secured resources.
+Access is governed by permissions defined in Keycloak, supporting flexible and dynamic Resource-Based Access Control (RBAC).
Use the `quarkus-keycloak-authorization` extension only if you are using Keycloak and Keycloak Authorization Services is enabled in your environment to handle authorization decisions.
@@ -23,11 +26,11 @@ If you are not using Keycloak, or if Keycloak is configured without Keycloak Aut
.How it works
-The `quarkus-keycloak-authorization` extension centralizes authorization responsibilities in Keycloak, enhancing security and simplifying application maintenance. The extension:
+The `quarkus-keycloak-authorization` extension centralizes authorization responsibilities in Keycloak, enhancing security and simplifying application maintenance:
-1. Uses the `quarkus-oidc` extension to verify bearer tokens.
-2. Sends verified tokens to Keycloak Authorization Services.
-3. Allows Keycloak to evaluate resource-based permissions dynamically, by using attributes such as resource name, identifier, or URI.
+- It uses the `quarkus-oidc` extension to verify bearer tokens.
+- It sends verified tokens to Keycloak Authorization Services.
+- It allows Keycloak to evaluate resource-based permissions dynamically by using attributes such as resource name, identifier, or URI.
By externalizing authorization decisions, you can:
@@ -36,7 +39,8 @@ By externalizing authorization decisions, you can:
.Compatibility
-This extension is compatible only with Quarkus xref:security-oidc-bearer-token-authentication.adoc[OIDC service applications]. It complements explicit mechanisms such as role-based access control with dynamic authorization policies.
+This extension is compatible only with Quarkus xref:security-oidc-bearer-token-authentication.adoc[OIDC service applications].
+It complements explicit mechanisms such as role-based access control with dynamic authorization policies.
.Key Features
@@ -48,8 +52,8 @@ This extension is compatible only with Quarkus xref:security-oidc-bearer-token-a
Before using this extension, ensure the following:
-1. Keycloak Authorization Services is enabled in your Keycloak instance.
-2. Your Quarkus application includes the `quarkus-keycloak-authorization` extension.
+. Keycloak Authorization Services is enabled in your Keycloak instance.
+. Your Quarkus application includes the `quarkus-keycloak-authorization` extension.
For detailed steps, see the xref:security-oidc-bearer-token-authentication.adoc[OIDC Bearer Token Authentication] guide.
@@ -75,7 +79,8 @@ This example demonstrates a simple microservice setup with two protected endpoin
.Token-based access control
-Access to these endpoints is controlled by using bearer tokens. To gain access, the following conditions must be met:
+Access to these endpoints is controlled by using bearer tokens.
+To gain access, the following conditions must be met:
- **Valid token**: The token must have a correct signature, a valid expiration date, and the appropriate audience.
- **Trust**: The microservice must trust the issuing Keycloak server.
@@ -91,8 +96,8 @@ For `/api/users/me`:
- **Access policy**: Open to users with a valid bearer token and the `user` role.
- **Response**: Returns user details as a JSON object derived from the token.
-
-Example response:
++
+.Example response
[source,json]
----
{
@@ -104,13 +109,15 @@ Example response:
}
----
+
For `/api/admin`:
- *Access policy*: Restricted to users with a valid bearer token and the `admin` role.
.Decoupled authorization
-This example highlights the use of role-based access control (RBAC) policies to protect resources. Key points include:
+This example highlights the use of role-based access control (RBAC) policies to protect resources.
+Key points include:
- *Policy flexibility*: Keycloak supports various policy types, such as attribute-based and custom policies, enabling fine-grained control.
- *Decoupled application logic*: Authorization policies are managed entirely by Keycloak, allowing your application to focus on its core functionality.
@@ -132,11 +139,12 @@ To get started, create a new project by using the following command:
:create-app-extensions: oidc,keycloak-authorization,rest-jackson
include::{includes}/devtools/create-app.adoc[]
-This command generates a new project with the `keycloak-authorization` extension. The extension integrates a Keycloak Adapter into your Quarkus application, providing the necessary capabilities to interact with a Keycloak server and perform bearer token authorization.
+This command generates a new project with the `keycloak-authorization` extension.
+The extension integrates a Keycloak Adapter into your Quarkus application, providing the necessary capabilities to interact with a Keycloak server and perform bearer token authorization.
.Adding extensions to an existing project
-If you already have an existing Quarkus project, you can add the `oidc` and `keycloak-authorization` extensions by running the following command in your project’s base directory:
+If you already have an existing Quarkus project, you can add the `oidc` and `keycloak-authorization` extensions by running the following command in your project's base directory:
:add-extension-extensions: oidc,keycloak-authorization
include::{includes}/devtools/extension-add.adoc[]
@@ -165,7 +173,8 @@ implementation("io.quarkus:quarkus-keycloak-authorization")
.Implementing the `/api/users/me` endpoint
-Start by implementing the `/api/users/me` endpoint. The following code defines a Jakarta REST resource that provides user details:
+Start by implementing the `/api/users/me` endpoint.
+The following code defines a Jakarta REST resource that provides user details:
[source,java]
----
@@ -209,7 +218,8 @@ public class UsersResource {
.Implementing the `/api/admin` endpoint
-Next, define the `/api/admin` endpoint. The following code represents a simple Jakarta REST resource protected with authentication:
+Next, define the `/api/admin` endpoint.
+The following code represents a simple Jakarta REST resource protected with authentication:
[source,java]
----
@@ -236,7 +246,8 @@ public class AdminResource {
.Role-based access control with Keycloak
-Notice that explicit annotations such as `@RolesAllowed` are not defined to enforce access control for the resources. Instead, the `keycloak-authorization` extension dynamically maps the URIs of protected resources in Keycloak.
+Notice that explicit annotations such as `@RolesAllowed` are not defined to enforce access control for the resources.
+Instead, the `keycloak-authorization` extension dynamically maps the URIs of protected resources in Keycloak.
Access control is managed as follows:
@@ -247,7 +258,8 @@ This decouples access control logic from the application code, making it easier
== Configuring the application
-You can use the OpenID Connect extension to configure the adapter settings through the `application.properties` file, typically located in the `src/main/resources` directory. Below is an example configuration:
+You can use the OpenID Connect extension to configure the adapter settings through the `application.properties` file, typically located in the `src/main/resources` directory.
+For example:
[source,properties]
----
@@ -267,18 +279,20 @@ quarkus.keycloak.devservices.realm-path=quarkus-realm.json <6>
<1> Specifies the URL of the Keycloak server and the realm used for authentication.
<2> Identifies the client application within the Keycloak realm.
<3> Defines the client secret for authentication with the Keycloak server.
-<4> Disables TLS verification for development purposes. Not recommended for production.
+<4> Disables TLS verification for development purposes, not recommended for production.
<5> Enables the Keycloak policy enforcer to manage access control based on defined permissions.
<6> Configures Dev Services to import a specified realm file, effective only in dev mode and not in JVM or native modes.
[NOTE]
====
-Adding the `%prod.` profile prefix to `quarkus.oidc.auth-server-url` ensures that Dev Services for Keycloak automatically launches a container in development mode. For more details, see the <> section.
+Adding the `%prod.` profile prefix to `quarkus.oidc.auth-server-url` ensures that Dev Services for Keycloak automatically launches a container in development mode.
+For more details, see the <> section.
====
[NOTE]
====
-By default, applications using the `quarkus-oidc` extension are treated as `service` type applications. However, the extension also supports `web-app` type applications under the following conditions:
+By default, applications using the `quarkus-oidc` extension are treated as `service` type applications.
+However, the extension also supports `web-app` type applications under the following conditions:
- The access token returned during the authorization code grant flow must be the source of roles (`quarkus.oidc.roles.source=accesstoken`).
- Note: For `web-app` type applications, ID token roles are checked by default.
@@ -309,21 +323,6 @@ docker run --name keycloak \
<1> For `keycloak.version`, ensure the version is `26.0.7` or later.
<2> For Keycloak keystore, use the `keycloak-keystore.jks` file located at https://github.com/quarkusio/quarkus-quickstarts/blob/main/security-keycloak-authorization-quickstart/config/keycloak-keystore.jks[quarkus-quickstarts/security-keycloak-authorization-quickstart/config].
-
-Try to access your Keycloak server at https://localhost:8543[localhost:8543].
-
-To access the Keycloak Administration Console, log in as the `admin` user.
-The username and password are both `admin`.
-
-Import the link:{quickstarts-tree-url}/security-keycloak-authorization-quickstart/config/quarkus-realm.json[realm configuration file] to create a new realm.
-For more details, see the Keycloak documentation about how to https://www.keycloak.org/docs/latest/server_admin/index.html#_create-realm[create a new realm].
-
-After importing the realm, you can see the resource permissions:
-
-image::keycloak-authorization-permissions.png[alt=Keycloak Authorization Permissions,role="center"]
-
-It explains why the endpoint has no `@RolesAllowed` annotations - the resource access permissions are set directly in Keycloak.
-
.Accessing the Keycloak server
. Open your browser and navigate to https://localhost:8543[https://localhost:8543].
@@ -333,7 +332,8 @@ It explains why the endpoint has no `@RolesAllowed` annotations - the resource a
.Importing the realm configuration
-To create a new realm, import the link:{quickstarts-tree-url}/security-keycloak-authorization-quickstart/config/quarkus-realm.json[realm configuration file]. For detailed steps on creating realms, refer to the Keycloak documentation: https://www.keycloak.org/docs/latest/server_admin/index.html#_create-realm[Create a new realm].
+To create a new realm, import the link:{quickstarts-tree-url}/security-keycloak-authorization-quickstart/config/quarkus-realm.json[realm configuration file].
+For detailed steps on creating realms, refer to the Keycloak documentation: https://www.keycloak.org/docs/latest/server_admin/index.html#_create-realm[Create a new realm].
After importing the realm, you can review the resource permissions:
@@ -341,7 +341,8 @@ image::keycloak-authorization-permissions.png[alt=Keycloak Authorization Permiss
.Role of Keycloak in resource permissions
-The resource access permissions are configured directly in Keycloak, which eliminates the need for `@RolesAllowed` annotations in your application code. This approach centralizes access control management within Keycloak, simplifying application maintenance and security updates.
+The resource access permissions are configured directly in Keycloak, which eliminates the need for `@RolesAllowed` annotations in your application code.
+This approach centralizes access control management within Keycloak, simplifying application maintenance and security updates.
[[keycloak-dev-mode]]
== Running the application in dev mode
@@ -352,12 +353,12 @@ include::{includes}/devtools/dev.adoc[]
xref:security-openid-connect-dev-services.adoc[Dev Services for Keycloak] starts a Keycloak container and imports the `quarkus-realm.json` configuration file.
-Open a xref:dev-ui.adoc[Dev UI] available at http://localhost:8080/q/dev-ui[/q/dev-ui] and click a `Provider: Keycloak` link in an `OpenID Connect` `Dev UI` card.
+Open a xref:dev-ui.adoc[Dev UI] available at http://localhost:8080/q/dev-ui[/q/dev-ui] and click a **Provider: Keycloak** link on an **OpenID Connect** card in the Dev UI.
.Interacting with Dev UI
. Open the xref:dev-ui.adoc[Dev UI] at http://localhost:8080/q/dev-ui[/q/dev-ui].
-. Click the `Provider: Keycloak` link within the `OpenID Connect` Dev UI card.
+. Click the **Provider: Keycloak** link within the **OpenID Connect** card in the Dev UI.
.Testing user permissions
@@ -374,17 +375,19 @@ When prompted to log in to a `Single Page Application` provided by `OpenID Conne
If you started Dev Services for Keycloak without importing a realm file such as link:{quickstarts-tree-url}/security-keycloak-authorization-quickstart/config/quarkus-realm.json[quarkus-realm.json], create a default `quarkus` realm without Keycloak authorization policies:
-. Select the `Keycloak Admin` link from the `OpenID Connect` Dev UI card.
-. Log in to the Keycloak admin console. The username and password are both `admin`.
+. Select the **Keycloak Admin** link from the **OpenID Connect** card in the Dev UI.
+. Log in to the Keycloak admin console.
+The username and password are both `admin`.
. Follow the instructions at link:https://www.keycloak.org/docs/latest/authorization_services/index.html[Keycloak Authorization Services documentation] to enable authorization policies in the `quarkus` realm.
-The `Keycloak Admin` link is easy to find in Dev UI:
+The **Keycloak Admin** link is easy to find in Dev UI:
image::dev-ui-oidc-keycloak-card.png[alt=Dev UI OpenID Connect Card,role="center"]
.Adding custom JavaScript policies
-If your application uses Keycloak authorization configured with link:https://www.keycloak.org/docs/latest/authorization_services/index.html#_policy_js[JavaScript policies] that are deployed in a JAR archive, Dev Services for Keycloak can transfer this archive to the Keycloak container. Use the following properties in `application.properties` to configure the transfer:
+If your application uses Keycloak authorization configured with link:https://www.keycloak.org/docs/latest/authorization_services/index.html#_policy_js[JavaScript policies] that are deployed in a JAR archive, Dev Services for Keycloak can transfer this archive to the Keycloak container.
+Use the following properties in `application.properties` to configure the transfer:
[source,properties]
----
@@ -393,18 +396,19 @@ quarkus.keycloak.devservices.resource-aliases.policies=/policies.jar <1>
# Map the policies archive to a specific location in the container
quarkus.keycloak.devservices.resource-mappings.policies=/opt/keycloak/providers/policies.jar <2>
----
-<1> Creates a `policies` alias for the `/policies.jar` classpath resource. The policies archive can also be located on the file system.
+<1> Creates a `policies` alias for the `/policies.jar` classpath resource.
+The policies archive can also be located on the file system.
<2> Maps the policies archive to the `/opt/keycloak/providers/policies.jar` location inside the Keycloak container.
== Running the application in JVM mode
-After exploring the application in dev mode, you can run it as a standard Java application.
+After exploring the application in dev mode, you can run it as a standard Java application in JVM mode.
-First compile it:
+Compile the application:
include::{includes}/devtools/build.adoc[]
-Then run it:
+Run the application:
[source,bash]
----
@@ -413,15 +417,18 @@ java -jar target/quarkus-app/quarkus-run.jar
== Running the application in native mode
-This same demo can be compiled into native code; no modifications are required.
+You can compile this demo into native code; no modifications are required.
+
+Native compilation eliminates the need for a JVM in the production environment because the produced binary includes the runtime and is optimized for minimal resource usage.
-This implies that you no longer need to install a JVM on your production environment because the runtime technology is included in the produced binary and optimized to run with minimal resources.
+Compilation takes longer and is disabled by default.
+To build the application, enable the `native` profile.
-Compilation takes a bit longer, so this step is turned off by default; let's build again by enabling the `native` profile:
+Build the native binary:
include::{includes}/devtools/build-native.adoc[]
-After a while, you can run this binary directly:
+After a while, run the native binary:
[source,bash]
----
@@ -431,11 +438,14 @@ After a while, you can run this binary directly:
[[testing]]
== Testing the application
-See the preceding <> section about testing your application in a dev mode.
+See the preceding <> section for instructions on testing your application in development mode.
-You can test the application launched in JVM or native modes with `curl`.
+You can test the application running in JVM or native modes by using `curl`.
-The application uses bearer token authorization, and the first thing to do is obtain an access token from the Keycloak server to access the application resources:
+.Obtaining an access token
+
+The application uses bearer token authorization.
+To access its resources, first obtain an access token from the Keycloak server:
[source,bash]
----
@@ -449,7 +459,8 @@ export access_token=$(\
[NOTE]
====
-When the `quarkus.oidc.authentication.user-info-required` property is set to `true` to require that an access token is used to request `UserInfo`, you must add a `scope=openid` query parameter to the token grant request command, for example:
+If the `quarkus.oidc.authentication.user-info-required` property is set to `true`, the application requires that an access token is used to request `UserInfo`.
+In that case, you must add the `scope=openid` query parameter to the token grant request; for example:
[source,bash]
----
@@ -462,11 +473,11 @@ export access_token=$(\
----
====
-The preceding example obtains an access token for user `alice`.
+The preceding example obtains an access token for the user `alice`.
+
+.Accessing the `/api/users/me` endpoint
-Any user is allowed to access the
-`http://localhost:8080/api/users/me` endpoint,
-which returns a JSON payload with details about the user.
+Any user with a valid access token can access the `http://localhost:8080/api/users/me` endpoint, which returns a JSON payload with user details:
[source,bash]
----
@@ -475,17 +486,21 @@ curl -v -X GET \
-H "Authorization: Bearer "$access_token
----
-The `http://localhost:8080/api/admin` endpoint can only be accessed by users with the `admin` role.
-If you try to access this endpoint with the previously issued access token, you get a `403` response from the server.
+.Accessing the `/api/admin` endpoint
+
+The `http://localhost:8080/api/admin` endpoint is restricted to users with the `admin` role.
+If you try to access this endpoint with the previously issued access token, the server returns a `403 Forbidden` response:
[source,bash]
----
- curl -v -X GET \
- http://localhost:8080/api/admin \
- -H "Authorization: Bearer "$access_token
+curl -v -X GET \
+ http://localhost:8080/api/admin \
+ -H "Authorization: Bearer "$access_token
----
-To access the admin endpoint, get a token for the `admin` user:
+.Getting an admin access token
+
+To access the admin endpoint, get an access token for the `admin` user:
[source,bash]
----
@@ -499,8 +514,8 @@ export access_token=$(\
== Injecting the authorization client
-In some cases, using the link:https://www.keycloak.org/docs/latest/authorization_services/#_service_client_api[Keycloak Authorization Client Java API] is beneficial for tasks such as managing resources and obtaining permissions directly from Keycloak.
-For this purpose, you can inject an `AuthzClient` instance into your beans as follows:
+You can use the link:https://www.keycloak.org/docs/latest/authorization_services/#_service_client_api[Keycloak Authorization Client Java API] for advanced tasks, such as managing resources and getting permissions directly from Keycloak.
+To enable this functionality, inject an `AuthzClient` instance into your beans:
[source,java]
----
@@ -512,18 +527,24 @@ public class ProtectedResource {
NOTE: If you want to use the `AuthzClient` directly, set `quarkus.keycloak.policy-enforcer.enable=true`; otherwise, no bean is available for injection.
+[NOTE]
+====
+To use the `AuthzClient` directly, set `quarkus.keycloak.policy-enforcer.enable=true`.
+Otherwise, no bean is available for injection.
+====
+
== Mapping protected resources
-By default, the extension fetches resources on-demand from Keycloak, using their URI to identify and map the resources in your application that need to be protected.
+By default, the extension fetches resources from Keycloak on demand, using their URI to identify and map the application resources that require protection.
-To disable this on-demand fetching and instead pre-load resources at startup, apply the following configuration setting:
+To disable on-demand fetching and instead pre-load resources at startup, configure the following property:
[source,properties]
----
quarkus.keycloak.policy-enforcer.lazy-load-paths=false
----
-The time required to pre-load resources from Keycloak at startup varies based on their quantity, potentially affecting your application's initial load time."
+The time required to pre-load resources from Keycloak during startup depends on the number of resources, which might impact your application's initial load time.
== More about configuring protected resources
@@ -534,10 +555,10 @@ For more details, check the xref:security-overview.adoc[Quarkus Security overvie
== Access to public resources
-To enable access to a public resource without the `quarkus-keycloak-authorization` applying its policies, create a `permit` HTTP Policy configuration in `application.properties`.
+To allow access to a public resource without applying `quarkus-keycloak-authorization` policies, define a `permit` HTTP policy in the `application.properties` file.
For more information, see the xref:security-authorize-web-endpoints-reference.adoc[Authorization of web endpoints] guide.
-There's no need to deactivate policy checks for a Keycloak Authorization Policy with settings such as these:
+You do not need to disable policy checks for a Keycloak Authorization Policy when using configurations like the following:
[source,properties]
----
@@ -545,7 +566,7 @@ quarkus.keycloak.policy-enforcer.paths.1.paths=/api/public
quarkus.keycloak.policy-enforcer.paths.1.enforcement-mode=DISABLED
----
-To block access to the public resource to anonymous users, you can create an enforcing Keycloak Authorization Policy:
+To restrict access to public resources for anonymous users, define an enforcing Keycloak Authorization Policy:
[source,properties]
----
@@ -557,8 +578,8 @@ Only the default tenant configuration applies when controlling anonymous access
== Checking permission scopes programmatically
-In addition to resource permissions, you can specify method scopes.
-The scope usually represents an action that can be performed on a resource.
+In addition to resource permissions, you can define method scopes.
+A scope typically represents an action performed on a resource.
You can create an enforcing Keycloak Authorization Policy with a method scope.
For example:
@@ -574,10 +595,11 @@ quarkus.keycloak.policy-enforcer.paths.1.methods.get.scopes=read <1>
quarkus.keycloak.policy-enforcer.paths.2.name=Scope Permission Resource
quarkus.keycloak.policy-enforcer.paths.2.paths=/api/protected/programmatic-way,/api/protected/annotation-way
----
-<1> User must have resource permission 'Scope Permission Resource' and scope 'read'
+<1> User must have resource permission `Scope Permission Resource` and scope `read`
+
+The Keycloak Policy Enforcer secures the `/api/protected/standard-way` request path, removing the need for annotations such as `@RolesAllowed`.
+However, in some scenarios, you may need to perform a programmatic check.
-The Keycloak Policy Enforcer now secures the `/api/protected/standard-way` request path, eliminating the need for additional annotations such as `@RolesAllowed`.
-However, in certain scenarios, a programmatic check is necessary.
You can achieve this by injecting a `SecurityIdentity` instance into your beans, as shown in the following example.
Or, you can get the same result by annotating the resource method with `@PermissionsAllowed`.
The following example demonstrates three resource methods, each requiring the same `read` scope:
@@ -636,15 +658,14 @@ public class ProtectedResource {
}
}
----
-<1> Request sub-path `/standard-way` requires both resource permission and scope `read` according to the configuration properties we previously set in the `application.properties`.
-<2> Request sub-path `/programmatic-way` only requires permission `Scope Permission Resource`, but we can enforce scope with `SecurityIdentity#checkPermission`.
-<3> The `@PermissionsAllowed` annotation only grants access to the requests with permission `Scope Permission Resource` and scope `read`.
+<1> The `/standard-way` sub-path requires both the resource permission and the `read` scope, based on the configuration set in the `application.properties` file.
+<2> The `/programmatic-way` sub-path checks only for the `Scope Permission Resource` permission by default. However, you can enforce additional constraints, such as scope requirements, by using `SecurityIdentity#checkPermission`.
+<3> The `@PermissionsAllowed` annotation at `/annotation-way` restricts access to requests that have the `Scope Permission Resource` permission along with the `read` scope.
For more information, see the section xref:security-authorize-web-endpoints-reference.adoc#standard-security-annotations[Authorization using annotations] of the Security Authorization guide.
== Multi-tenancy
You can set up policy enforcer configurations for each tenant, similar to how it is done with xref:security-openid-connect-multitenancy.adoc[OpenID Connect (OIDC) multi-tenancy].
-
For example:
[source,properties]
@@ -689,10 +710,7 @@ quarkus.keycloak.webapp-tenant.policy-enforcer.paths.1.claim-information-point.c
== Dynamic tenant configuration resolution
-If you need a more dynamic configuration for the different tenants you want to support and don’t want to end up
-with multiple entries in your configuration file, you can use the `io.quarkus.keycloak.pep.TenantPolicyConfigResolver`.
-
-This interface allows you to dynamically create tenant configurations at runtime:
+To create configurations for multiple tenants while avoiding excessive entries in your configuration file, you can use the `io.quarkus.keycloak.pep.TenantPolicyConfigResolver` interface to define them programmatically at runtime.
[source,java]
----
@@ -742,9 +760,9 @@ public class CustomTenantPolicyConfigResolver implements TenantPolicyConfigResol
}
}
----
-<1> Create or update the `/enhanced-config` path in the default tenant config.
-<2> Add `/new-config` path into tenant config populated with documented configuration default values.
-<3> Use default static tenant configuration resolution based on the `application.properties` file and other SmallRye Config configuration sources.
+<1> Define or update the `/enhanced-config` path in the default tenant configuration.
+<2> Add the `/new-config` path to the tenant configuration, including custom claims and values that are populated programmatically.
+<3> Fallback to the default static tenant configuration resolution defined in the `application.properties` file or other SmallRye Config sources.
== Configuration reference
diff --git a/docs/src/main/asciidoc/security-oidc-auth0-tutorial.adoc b/docs/src/main/asciidoc/security-oidc-auth0-tutorial.adoc
index 66bdaa41cc42a..9d8bf00278459 100644
--- a/docs/src/main/asciidoc/security-oidc-auth0-tutorial.adoc
+++ b/docs/src/main/asciidoc/security-oidc-auth0-tutorial.adoc
@@ -708,7 +708,7 @@ import jakarta.ws.rs.Produces;
import jakarta.ws.rs.core.MediaType;
import org.eclipse.microprofile.rest.client.inject.RegisterRestClient;
-import io.quarkus.oidc.token.propagation.AccessToken;
+import io.quarkus.oidc.token.propagation.common.AccessToken;
@RegisterRestClient
@AccessToken <1>
diff --git a/docs/src/main/asciidoc/security-oidc-bearer-token-authentication-tutorial.adoc b/docs/src/main/asciidoc/security-oidc-bearer-token-authentication-tutorial.adoc
index fbcb7fd5fbd91..7d8f81e56e933 100644
--- a/docs/src/main/asciidoc/security-oidc-bearer-token-authentication-tutorial.adoc
+++ b/docs/src/main/asciidoc/security-oidc-bearer-token-authentication-tutorial.adoc
@@ -231,7 +231,16 @@ For more information, see the Keycloak documentation about link:https://www.keyc
ifndef::no-quarkus-keycloak-admin-client[]
[NOTE]
====
-If you want to use the Keycloak Admin Client to configure your server from your application, you need to include either the `quarkus-keycloak-admin-rest-client` or the `quarkus-keycloak-admin-resteasy-client` (if the application uses `quarkus-rest-client`) extension.
+To configure the Keycloak server from your application by using the Keycloak Admin Client, include one of the following extensions based on your setup:
+
+- *For Quarkus REST*: If you are using `quarkus-rest`, `quarkus-rest-client`, or both, include the `quarkus-keycloak-admin-rest-client` extension.
+
+- *For RESTEasy Classic*: If you are using `quarkus-resteasy`, `quarkus-resteasy-client`, or both, include the `quarkus-keycloak-admin-resteasy-client` extension.
+
+- *If no REST layer is explicitly used*: It is recommended to include the `quarkus-keycloak-admin-rest-client` extension.
+
+These guidelines ensure seamless integration of the Keycloak Admin Client with your REST framework, whether you are working with a REST server, a REST client, or both.
+
For more information, see the xref:security-keycloak-admin-client.adoc[Quarkus Keycloak Admin Client] guide.
====
endif::no-quarkus-keycloak-admin-client[]
diff --git a/docs/src/main/asciidoc/security-oidc-bearer-token-authentication.adoc b/docs/src/main/asciidoc/security-oidc-bearer-token-authentication.adoc
index 0e3c6a1bfe588..ab1ee4e5c4024 100644
--- a/docs/src/main/asciidoc/security-oidc-bearer-token-authentication.adoc
+++ b/docs/src/main/asciidoc/security-oidc-bearer-token-authentication.adoc
@@ -411,14 +411,28 @@ For example, if you work with Keycloak, you can use `keycloak.js` to authenticat
keycloak-spa
-
-
+
-
+