diff --git a/extensions-contrib/grpc-query/src/main/java/org/apache/druid/grpc/server/QueryDriver.java b/extensions-contrib/grpc-query/src/main/java/org/apache/druid/grpc/server/QueryDriver.java
index 096a1439a4fd..ca3aeb4e65ac 100644
--- a/extensions-contrib/grpc-query/src/main/java/org/apache/druid/grpc/server/QueryDriver.java
+++ b/extensions-contrib/grpc-query/src/main/java/org/apache/druid/grpc/server/QueryDriver.java
@@ -49,6 +49,7 @@
import org.apache.druid.server.QueryLifecycleFactory;
import org.apache.druid.server.security.Access;
import org.apache.druid.server.security.AuthenticationResult;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.ForbiddenException;
import org.apache.druid.sql.DirectStatement;
import org.apache.druid.sql.DirectStatement.ResultSet;
@@ -146,8 +147,8 @@ private QueryResponse runNativeQuery(QueryRequest request, AuthenticationResult
final String currThreadName = Thread.currentThread().getName();
try {
queryLifecycle.initialize(query);
- Access authorizationResult = queryLifecycle.authorize(authResult);
- if (!authorizationResult.isAllowed()) {
+ AuthorizationResult authorizationResult = queryLifecycle.authorize(authResult);
+ if (!authorizationResult.allowAccessWithNoRestriction()) {
throw new ForbiddenException(Access.DEFAULT_ERROR_MESSAGE);
}
queryResponse = queryLifecycle.execute();
diff --git a/extensions-core/druid-basic-security/src/main/java/org/apache/druid/security/basic/BasicSecurityResourceFilter.java b/extensions-core/druid-basic-security/src/main/java/org/apache/druid/security/basic/BasicSecurityResourceFilter.java
index bcb4ec053457..eade42320c7d 100644
--- a/extensions-core/druid-basic-security/src/main/java/org/apache/druid/security/basic/BasicSecurityResourceFilter.java
+++ b/extensions-core/druid-basic-security/src/main/java/org/apache/druid/security/basic/BasicSecurityResourceFilter.java
@@ -23,7 +23,7 @@
import com.sun.jersey.spi.container.ContainerRequest;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.server.http.security.AbstractResourceFilter;
-import org.apache.druid.server.security.Access;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.AuthorizationUtils;
import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.Resource;
@@ -54,17 +54,17 @@ public ContainerRequest filter(ContainerRequest request)
getAction(request)
);
- final Access authResult = AuthorizationUtils.authorizeResourceAction(
+ final AuthorizationResult authResult = AuthorizationUtils.authorizeResourceAction(
getReq(),
resourceAction,
getAuthorizerMapper()
);
- if (!authResult.isAllowed()) {
+ if (!authResult.allowAccessWithNoRestriction()) {
throw new WebApplicationException(
Response.status(Response.Status.FORBIDDEN)
.type(MediaType.TEXT_PLAIN)
- .entity(StringUtils.format("Access-Check-Result: %s", authResult.toString()))
+ .entity(StringUtils.format("Access-Check-Result: %s", authResult.getErrorMessage()))
.build()
);
}
diff --git a/extensions-core/druid-catalog/src/main/java/org/apache/druid/catalog/http/CatalogResource.java b/extensions-core/druid-catalog/src/main/java/org/apache/druid/catalog/http/CatalogResource.java
index 2590f57c8b38..c206266f5f12 100644
--- a/extensions-core/druid-catalog/src/main/java/org/apache/druid/catalog/http/CatalogResource.java
+++ b/extensions-core/druid-catalog/src/main/java/org/apache/druid/catalog/http/CatalogResource.java
@@ -34,8 +34,8 @@
import org.apache.druid.java.util.common.IAE;
import org.apache.druid.java.util.common.Pair;
import org.apache.druid.java.util.common.StringUtils;
-import org.apache.druid.server.security.Access;
import org.apache.druid.server.security.Action;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.AuthorizationUtils;
import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.ForbiddenException;
@@ -56,7 +56,6 @@
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
-
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
@@ -108,17 +107,17 @@ public CatalogResource(
*
*
* @param schemaName The name of the Druid schema, which must be writable
- * and the user must have at least read access.
- * @param tableName The name of the table definition to modify. The user must
- * have write access to the table.
- * @param spec The new table definition.
- * @param version the expected version of an existing table. The version must
- * match. If not (or if the table does not exist), returns an error.
- * @param overwrite if {@code true}, then overwrites any existing table.
- * If {@code false}, then the operation fails if the table already exists.
- * Ignored if a version is specified.
- * @param req the HTTP request used for authorization.
- */
+ * and the user must have at least read access.
+ * @param tableName The name of the table definition to modify. The user must
+ * have write access to the table.
+ * @param spec The new table definition.
+ * @param version the expected version of an existing table. The version must
+ * match. If not (or if the table does not exist), returns an error.
+ * @param overwrite if {@code true}, then overwrites any existing table.
+ * If {@code false}, then the operation fails if the table already exists.
+ * Ignored if a version is specified.
+ * @param req the HTTP request used for authorization.
+ */
@POST
@Path("/schemas/{schema}/tables/{name}")
@Consumes(MediaType.APPLICATION_JSON)
@@ -181,9 +180,9 @@ public Response postTable(
* the definition is created before the datasource itself.)
*
* @param schemaName The Druid schema. The user must have read access.
- * @param tableName The name of the table within the schema. The user must have
- * read access.
- * @param req the HTTP request used for authorization.
+ * @param tableName The name of the table within the schema. The user must have
+ * read access.
+ * @param req the HTTP request used for authorization.
* @return the definition for the table, if any.
*/
@GET
@@ -211,8 +210,8 @@ public Response getTable(
* for the given schema and table.
*
* @param schemaName The name of the schema that holds the table.
- * @param tableName The name of the table definition to delete. The user must have
- * write access.
+ * @param tableName The name of the table definition to delete. The user must have
+ * write access.
*/
@DELETE
@Path("/schemas/{schema}/tables/{name}")
@@ -247,9 +246,9 @@ public Response deleteTable(
* the table spec changed between the time it was retrieve and the edit operation
* is submitted.
*
- * @param schemaName The name of the schema that holds the table.
- * @param tableName The name of the table definition to delete. The user must have
- * write access.
+ * @param schemaName The name of the schema that holds the table.
+ * @param tableName The name of the table definition to delete. The user must have
+ * write access.
* @param editRequest The operation to perform. See the classes for details.
*/
@POST
@@ -281,7 +280,7 @@ public Response editTable(
* Retrieves the list of all Druid schema names.
*
* @param format the format of the response. See the code for the
- * available formats
+ * available formats
*/
@GET
@Path("/schemas")
@@ -318,9 +317,9 @@ public Response getSchemas(
* the read-only schemas, there will be no table definitions.
*
* @param schemaName The name of the Druid schema to query. The user must
- * have read access.
- * @param format the format of the response. See the code for the
- * available formats
+ * have read access.
+ * @param format the format of the response. See the code for the
+ * available formats
*/
@GET
@Path("/schemas/{schema}/tables")
@@ -360,7 +359,7 @@ public Response getSchemaTables(
* table definitions known to the catalog. Used to prime a cache on first access.
* After that, the Coordinator will push updates to Brokers. Returns the full
* list of table details.
- *
+ *
* It is expected that the number of table definitions will be of small or moderate
* size, so no provision is made to handle very large lists.
*/
@@ -467,9 +466,9 @@ private Response listAllTableMetadata(final HttpServletRequest req)
List> tables = new ArrayList<>();
for (SchemaSpec schema : catalog.schemaRegistry().schemas()) {
tables.addAll(catalog.tables().tablesInSchema(schema.name())
- .stream()
- .map(table -> Pair.of(schema, table))
- .collect(Collectors.toList()));
+ .stream()
+ .map(table -> Pair.of(schema, table))
+ .collect(Collectors.toList()));
}
Iterable> filtered = AuthorizationUtils.filterAuthorizedResources(
@@ -483,9 +482,9 @@ private Response listAllTableMetadata(final HttpServletRequest req)
);
List metadata = Lists.newArrayList(filtered)
- .stream()
- .map(pair -> pair.rhs)
- .collect(Collectors.toList());
+ .stream()
+ .map(pair -> pair.rhs)
+ .collect(Collectors.toList());
return Response.ok().entity(metadata).build();
}
@@ -499,9 +498,9 @@ private Response tableNamesInSchema(
req,
tables,
name ->
- Collections.singletonList(
- resourceAction(schema, name, Action.READ)),
- authorizerMapper
+ Collections.singletonList(
+ resourceAction(schema, name, Action.READ)),
+ authorizerMapper
);
return Response.ok().entity(Lists.newArrayList(filtered)).build();
}
@@ -581,13 +580,13 @@ private void authorizeTable(
private void authorize(String resource, String key, Action action, HttpServletRequest request)
{
- final Access authResult = authorizeAccess(resource, key, action, request);
- if (!authResult.isAllowed()) {
- throw new ForbiddenException(authResult.toString());
+ final AuthorizationResult authResult = authorizeAccess(resource, key, action, request);
+ if (!authResult.allowAccessWithNoRestriction()) {
+ throw new ForbiddenException(authResult.getErrorMessage());
}
}
- private Access authorizeAccess(String resource, String key, Action action, HttpServletRequest request)
+ private AuthorizationResult authorizeAccess(String resource, String key, Action action, HttpServletRequest request)
{
return AuthorizationUtils.authorizeResourceAction(
request,
diff --git a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/dart/controller/http/DartSqlResource.java b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/dart/controller/http/DartSqlResource.java
index 65d770a29c55..a277d7d126ff 100644
--- a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/dart/controller/http/DartSqlResource.java
+++ b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/dart/controller/http/DartSqlResource.java
@@ -36,9 +36,9 @@
import org.apache.druid.server.DruidNode;
import org.apache.druid.server.ResponseContextConfig;
import org.apache.druid.server.initialization.ServerConfig;
-import org.apache.druid.server.security.Access;
import org.apache.druid.server.security.Action;
import org.apache.druid.server.security.AuthenticationResult;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.AuthorizationUtils;
import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.Resource;
@@ -144,7 +144,7 @@ public GetQueriesResponse doGetRunningQueries(
)
{
final AuthenticationResult authenticationResult = AuthorizationUtils.authenticationResultFromRequest(req);
- final Access stateReadAccess = AuthorizationUtils.authorizeAllResourceActions(
+ final AuthorizationResult stateReadAccess = AuthorizationUtils.authorizeAllResourceActions(
authenticationResult,
Collections.singletonList(new ResourceAction(Resource.STATE_RESOURCE, Action.READ)),
authorizerMapper
@@ -175,7 +175,7 @@ public GetQueriesResponse doGetRunningQueries(
queries.sort(Comparator.comparing(DartQueryInfo::getStartTime).thenComparing(DartQueryInfo::getDartQueryId));
final GetQueriesResponse response;
- if (stateReadAccess.isAllowed()) {
+ if (stateReadAccess.allowAccessWithNoRestriction()) {
// User can READ STATE, so they can see all running queries, as well as authentication details.
response = new GetQueriesResponse(queries);
} else {
@@ -245,9 +245,9 @@ public Response cancelQuery(
return Response.status(Response.Status.ACCEPTED).build();
}
- final Access access = authorizeCancellation(req, cancelables);
+ final AuthorizationResult authResult = authorizeCancellation(req, cancelables);
- if (access.isAllowed()) {
+ if (authResult.allowAccessWithNoRestriction()) {
sqlLifecycleManager.removeAll(sqlQueryId, cancelables);
// Don't call cancel() on the cancelables. That just cancels native queries, which is useless here. Instead,
diff --git a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/dart/controller/sql/DartQueryMaker.java b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/dart/controller/sql/DartQueryMaker.java
index c35ca39a9f04..4526754cf573 100644
--- a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/dart/controller/sql/DartQueryMaker.java
+++ b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/dart/controller/sql/DartQueryMaker.java
@@ -52,6 +52,7 @@
import org.apache.druid.msq.sql.MSQTaskQueryMaker;
import org.apache.druid.segment.column.ColumnType;
import org.apache.druid.server.QueryResponse;
+import org.apache.druid.server.security.ForbiddenException;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.DruidQuery;
import org.apache.druid.sql.calcite.run.QueryMaker;
@@ -127,6 +128,9 @@ public DartQueryMaker(
@Override
public QueryResponse runQuery(DruidQuery druidQuery)
{
+ if (!plannerContext.getAuthorizationResult().allowAccessWithNoRestriction()) {
+ throw new ForbiddenException(plannerContext.getAuthorizationResult().getErrorMessage());
+ }
final MSQSpec querySpec = MSQTaskQueryMaker.makeQuerySpec(
null,
druidQuery,
diff --git a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/rpc/MSQResourceUtils.java b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/rpc/MSQResourceUtils.java
index 8820b4ead5a0..ade376066138 100644
--- a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/rpc/MSQResourceUtils.java
+++ b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/rpc/MSQResourceUtils.java
@@ -19,7 +19,7 @@
package org.apache.druid.msq.rpc;
-import org.apache.druid.server.security.Access;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.AuthorizationUtils;
import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.ForbiddenException;
@@ -41,10 +41,14 @@ public static void authorizeAdminRequest(
{
final List resourceActions = permissionMapper.getAdminPermissions();
- Access access = AuthorizationUtils.authorizeAllResourceActions(request, resourceActions, authorizerMapper);
+ AuthorizationResult authResult = AuthorizationUtils.authorizeAllResourceActions(
+ request,
+ resourceActions,
+ authorizerMapper
+ );
- if (!access.isAllowed()) {
- throw new ForbiddenException(access.toString());
+ if (!authResult.allowAccessWithNoRestriction()) {
+ throw new ForbiddenException(authResult.getErrorMessage());
}
}
@@ -57,10 +61,14 @@ public static void authorizeQueryRequest(
{
final List resourceActions = permissionMapper.getQueryPermissions(queryId);
- Access access = AuthorizationUtils.authorizeAllResourceActions(request, resourceActions, authorizerMapper);
+ AuthorizationResult authResult = AuthorizationUtils.authorizeAllResourceActions(
+ request,
+ resourceActions,
+ authorizerMapper
+ );
- if (!access.isAllowed()) {
- throw new ForbiddenException(access.toString());
+ if (!authResult.allowAccessWithNoRestriction()) {
+ throw new ForbiddenException(authResult.getErrorMessage());
}
}
}
diff --git a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskQueryMaker.java b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskQueryMaker.java
index 5462b9917376..9f69396edcfc 100644
--- a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskQueryMaker.java
+++ b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskQueryMaker.java
@@ -54,6 +54,7 @@
import org.apache.druid.segment.column.ColumnType;
import org.apache.druid.server.QueryResponse;
import org.apache.druid.server.lookup.cache.LookupLoadingSpec;
+import org.apache.druid.server.security.ForbiddenException;
import org.apache.druid.sql.calcite.parser.DruidSqlIngest;
import org.apache.druid.sql.calcite.parser.DruidSqlInsert;
import org.apache.druid.sql.calcite.parser.DruidSqlReplace;
@@ -116,6 +117,9 @@ public class MSQTaskQueryMaker implements QueryMaker
@Override
public QueryResponse runQuery(final DruidQuery druidQuery)
{
+ if (!plannerContext.getAuthorizationResult().allowAccessWithNoRestriction()) {
+ throw new ForbiddenException(plannerContext.getAuthorizationResult().getErrorMessage());
+ }
Hook.QUERY_PLAN.run(druidQuery.getQuery());
plannerContext.dispatchHook(DruidHook.NATIVE_PLAN, druidQuery.getQuery());
diff --git a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/resources/SqlStatementResource.java b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/resources/SqlStatementResource.java
index c92bfa955fb6..e26969c67619 100644
--- a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/resources/SqlStatementResource.java
+++ b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/resources/SqlStatementResource.java
@@ -74,9 +74,9 @@
import org.apache.druid.rpc.HttpResponseException;
import org.apache.druid.rpc.indexing.OverlordClient;
import org.apache.druid.server.QueryResponse;
-import org.apache.druid.server.security.Access;
import org.apache.druid.server.security.Action;
import org.apache.druid.server.security.AuthenticationResult;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.AuthorizationUtils;
import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.ForbiddenException;
@@ -484,7 +484,13 @@ private Response buildTaskResponse(Sequence sequence, AuthenticationRe
}
String taskId = String.valueOf(firstRow[0]);
- Optional statementResult = getStatementStatus(taskId, authenticationResult, true, Action.READ, false);
+ Optional statementResult = getStatementStatus(
+ taskId,
+ authenticationResult,
+ true,
+ Action.READ,
+ false
+ );
if (statementResult.isPresent()) {
return Response.status(Response.Status.OK).entity(statementResult.get()).build();
@@ -585,7 +591,11 @@ private Optional getStatementStatus(
}
// since we need the controller payload for auth checks.
- MSQControllerTask msqControllerTask = getMSQControllerTaskAndCheckPermission(queryId, authenticationResult, forAction);
+ MSQControllerTask msqControllerTask = getMSQControllerTaskAndCheckPermission(
+ queryId,
+ authenticationResult,
+ forAction
+ );
SqlStatementState sqlStatementState = SqlStatementResourceHelper.getSqlStatementState(statusPlus);
MSQTaskReportPayload taskReportPayload = null;
@@ -640,9 +650,9 @@ private Optional getStatementStatus(
* necessary permissions. A user has the necessary permissions if one of the following criteria is satisfied:
* 1. The user is the one who submitted the query
* 2. The user belongs to a role containing the READ or WRITE permissions over the STATE resource. For endpoints like GET,
- * the user should have READ permission for the STATE resource, while for endpoints like DELETE, the user should
- * have WRITE permission for the STATE resource. (Note: POST API does not need to check the state permissions since
- * the currentUser always equal to the queryUser)
+ * the user should have READ permission for the STATE resource, while for endpoints like DELETE, the user should
+ * have WRITE permission for the STATE resource. (Note: POST API does not need to check the state permissions since
+ * the currentUser always equal to the queryUser)
*/
private MSQControllerTask getMSQControllerTaskAndCheckPermission(
String queryId,
@@ -665,21 +675,21 @@ private MSQControllerTask getMSQControllerTaskAndCheckPermission(
return msqControllerTask;
}
- Access access = AuthorizationUtils.authorizeAllResourceActions(
+ AuthorizationResult authResult = AuthorizationUtils.authorizeAllResourceActions(
authenticationResult,
Collections.singletonList(new ResourceAction(Resource.STATE_RESOURCE, forAction)),
authorizerMapper
);
- if (access.isAllowed()) {
- return msqControllerTask;
+ if (!authResult.allowAccessWithNoRestriction()) {
+ throw new ForbiddenException(StringUtils.format(
+ "The current user[%s] cannot view query id[%s] since the query is owned by another user",
+ currentUser,
+ queryId
+ ));
}
- throw new ForbiddenException(StringUtils.format(
- "The current user[%s] cannot view query id[%s] since the query is owned by another user",
- currentUser,
- queryId
- ));
+ return msqControllerTask;
}
/**
@@ -990,7 +1000,11 @@ private T contactOverlord(final ListenableFuture future, String queryId)
private static DruidException queryNotFoundException(String queryId)
{
- return NotFound.exception("Query [%s] was not found. The query details are no longer present or might not be of the type [%s]. Verify that the id is correct.", queryId, MSQControllerTask.TYPE);
+ return NotFound.exception(
+ "Query [%s] was not found. The query details are no longer present or might not be of the type [%s]. Verify that the id is correct.",
+ queryId,
+ MSQControllerTask.TYPE
+ );
}
}
diff --git a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/dart/controller/http/DartSqlResourceTest.java b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/dart/controller/http/DartSqlResourceTest.java
index 981f96fbe2a8..10b5f20e4187 100644
--- a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/dart/controller/http/DartSqlResourceTest.java
+++ b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/dart/controller/http/DartSqlResourceTest.java
@@ -509,6 +509,61 @@ public void test_doPost_regularUser_forbidden()
);
}
+ @Test
+ public void test_doPost_regularUser_restricted_throwsForbidden()
+ {
+ final MockAsyncContext asyncContext = new MockAsyncContext();
+ final MockHttpServletResponse asyncResponse = new MockHttpServletResponse();
+ asyncContext.response = asyncResponse;
+
+ Mockito.when(httpServletRequest.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT))
+ .thenReturn(makeAuthenticationResult(REGULAR_USER_NAME));
+ Mockito.when(httpServletRequest.startAsync())
+ .thenReturn(asyncContext);
+
+ final SqlQuery sqlQuery = new SqlQuery(
+ StringUtils.format("SELECT * FROM \"%s\"", CalciteTests.RESTRICTED_DATASOURCE),
+ ResultFormat.ARRAY,
+ false,
+ false,
+ false,
+ Collections.emptyMap(),
+ Collections.emptyList()
+ );
+
+ ForbiddenException e = Assertions.assertThrows(
+ ForbiddenException.class,
+ () -> sqlResource.doPost(sqlQuery, httpServletRequest)
+ );
+ Assertions.assertEquals("Unauthorized", e.getMessage());
+ }
+
+ @Test
+ public void test_doPost_superUser_restricted_throwsServerError()
+ {
+ final MockAsyncContext asyncContext = new MockAsyncContext();
+ final MockHttpServletResponse asyncResponse = new MockHttpServletResponse();
+ asyncContext.response = asyncResponse;
+
+ Mockito.when(httpServletRequest.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT))
+ .thenReturn(makeAuthenticationResult(CalciteTests.TEST_SUPERUSER_NAME));
+ Mockito.when(httpServletRequest.startAsync())
+ .thenReturn(asyncContext);
+
+ final SqlQuery sqlQuery = new SqlQuery(
+ StringUtils.format("SELECT * FROM \"%s\"", CalciteTests.RESTRICTED_DATASOURCE),
+ ResultFormat.ARRAY,
+ false,
+ false,
+ false,
+ Collections.emptyMap(),
+ Collections.emptyList()
+ );
+ Assertions.assertNull(sqlResource.doPost(sqlQuery, httpServletRequest));
+ // Super user can run a dart query, but we don't support it yet.
+ Assertions.assertEquals(Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(), asyncResponse.getStatus());
+ }
+
@Test
public void test_doPost_regularUser_runtimeError() throws IOException
{
@@ -571,7 +626,9 @@ public void test_doPost_regularUser_fullReport() throws Exception
final List> reportMaps = objectMapper.readValue(
asyncResponse.baos.toByteArray(),
- new TypeReference<>() {}
+ new TypeReference<>()
+ {
+ }
);
Assertions.assertEquals(1, reportMaps.size());
@@ -610,7 +667,9 @@ public void test_doPost_regularUser_runtimeError_fullReport() throws Exception
final List> reportMaps = objectMapper.readValue(
asyncResponse.baos.toByteArray(),
- new TypeReference<>() {}
+ new TypeReference<>()
+ {
+ }
);
Assertions.assertEquals(1, reportMaps.size());
diff --git a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java
index 39df97297162..130de0f60aaf 100644
--- a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java
+++ b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java
@@ -74,6 +74,7 @@
import org.apache.druid.segment.join.JoinType;
import org.apache.druid.segment.virtual.ExpressionVirtualColumn;
import org.apache.druid.server.lookup.cache.LookupLoadingSpec;
+import org.apache.druid.server.security.ForbiddenException;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.external.ExternalDataSource;
import org.apache.druid.sql.calcite.filtration.Filtration;
@@ -795,6 +796,20 @@ public void testSelectWithGroupByLimit(String contextName, Map c
}
+ @MethodSource("data")
+ @ParameterizedTest(name = "{index}:with context {0}")
+ public void testSelectRestricted(String contextName, Map context)
+ {
+ testSelectQuery()
+ .setSql("select count(*) from druid.restrictedDatasource_m1_is_6")
+ .setQueryContext(context)
+ .setExpectedExecutionErrorMatcher(CoreMatchers.allOf(
+ CoreMatchers.instanceOf(ForbiddenException.class),
+ ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString("Unauthorized"))
+ ))
+ .verifyExecutionError();
+ }
+
@MethodSource("data")
@ParameterizedTest(name = "{index}:with context {0}")
public void testSelectLookup(String contextName, Map context)
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/IndexTaskUtils.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/IndexTaskUtils.java
index 79a3e8993a8c..1d0f2622ce30 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/IndexTaskUtils.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/IndexTaskUtils.java
@@ -27,8 +27,8 @@
import org.apache.druid.java.util.emitter.service.ServiceMetricEvent;
import org.apache.druid.query.DruidMetrics;
import org.apache.druid.segment.incremental.ParseExceptionReport;
-import org.apache.druid.server.security.Access;
import org.apache.druid.server.security.Action;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.AuthorizationUtils;
import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.ForbiddenException;
@@ -67,7 +67,7 @@ public static List getReportListFromSavedParseExceptions(
*
* @return authorization result
*/
- public static Access datasourceAuthorizationCheck(
+ public static AuthorizationResult datasourceAuthorizationCheck(
final HttpServletRequest req,
Action action,
String datasource,
@@ -79,12 +79,11 @@ public static Access datasourceAuthorizationCheck(
action
);
- Access access = AuthorizationUtils.authorizeResourceAction(req, resourceAction, authorizerMapper);
- if (!access.isAllowed()) {
- throw new ForbiddenException(access.toString());
+ AuthorizationResult authResult = AuthorizationUtils.authorizeResourceAction(req, resourceAction, authorizerMapper);
+ if (!authResult.allowAccessWithNoRestriction()) {
+ throw new ForbiddenException(authResult.getErrorMessage());
}
-
- return access;
+ return authResult;
}
public static void setTaskDimensions(final ServiceMetricEvent.Builder metricBuilder, final Task task)
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/http/OverlordResource.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/http/OverlordResource.java
index fc2b00ad6f58..74656dfdb5f1 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/http/OverlordResource.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/http/OverlordResource.java
@@ -66,9 +66,9 @@
import org.apache.druid.server.http.security.ConfigResourceFilter;
import org.apache.druid.server.http.security.DatasourceResourceFilter;
import org.apache.druid.server.http.security.StateResourceFilter;
-import org.apache.druid.server.security.Access;
import org.apache.druid.server.security.Action;
import org.apache.druid.server.security.AuthConfig;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.AuthorizationUtils;
import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.ForbiddenException;
@@ -177,14 +177,13 @@ public Response taskPost(
.build();
}
- Access authResult = AuthorizationUtils.authorizeAllResourceActions(
+ AuthorizationResult authResult = AuthorizationUtils.authorizeAllResourceActions(
req,
resourceActions,
authorizerMapper
);
-
- if (!authResult.isAllowed()) {
- throw new ForbiddenException(authResult.getMessage());
+ if (!authResult.allowAccessWithNoRestriction()) {
+ throw new ForbiddenException(authResult.getErrorMessage());
}
return asLeaderWith(
@@ -609,16 +608,17 @@ public Response getTasks(
new Resource(dataSource, ResourceType.DATASOURCE),
Action.READ
);
- final Access authResult = AuthorizationUtils.authorizeResourceAction(
+ final AuthorizationResult authResult = AuthorizationUtils.authorizeResourceAction(
req,
resourceAction,
authorizerMapper
);
- if (!authResult.isAllowed()) {
+
+ if (!authResult.allowAccessWithNoRestriction()) {
throw new WebApplicationException(
Response.status(Response.Status.FORBIDDEN)
.type(MediaType.TEXT_PLAIN)
- .entity(StringUtils.format("Access-Check-Result: %s", authResult.toString()))
+ .entity(StringUtils.format("Access-Check-Result: %s", authResult.getErrorMessage()))
.build()
);
}
@@ -654,7 +654,7 @@ public Response killPendingSegments(
{
final Interval deleteInterval = Intervals.of(deleteIntervalString);
// check auth for dataSource
- final Access authResult = AuthorizationUtils.authorizeAllResourceActions(
+ final AuthorizationResult authResult = AuthorizationUtils.authorizeAllResourceActions(
request,
ImmutableList.of(
new ResourceAction(new Resource(dataSource, ResourceType.DATASOURCE), Action.READ),
@@ -663,8 +663,8 @@ public Response killPendingSegments(
authorizerMapper
);
- if (!authResult.isAllowed()) {
- throw new ForbiddenException(authResult.getMessage());
+ if (!authResult.allowAccessWithNoRestriction()) {
+ throw new ForbiddenException(authResult.getErrorMessage());
}
if (overlord.isLeader()) {
@@ -678,7 +678,12 @@ public Response killPendingSegments(
.build();
}
catch (Exception e) {
- log.warn(e, "Failed to delete pending segments for datasource[%s] and interval[%s].", dataSource, deleteInterval);
+ log.warn(
+ e,
+ "Failed to delete pending segments for datasource[%s] and interval[%s].",
+ dataSource,
+ deleteInterval
+ );
return Response.status(Status.INTERNAL_SERVER_ERROR)
.entity(ImmutableMap.of("error", e.getMessage()))
.build();
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/http/security/SupervisorResourceFilter.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/http/security/SupervisorResourceFilter.java
index c4be66719913..0265d20c1dee 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/http/security/SupervisorResourceFilter.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/http/security/SupervisorResourceFilter.java
@@ -30,8 +30,8 @@
import org.apache.druid.indexing.overlord.supervisor.SupervisorSpec;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.server.http.security.AbstractResourceFilter;
-import org.apache.druid.server.security.Access;
import org.apache.druid.server.security.Action;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.AuthorizationUtils;
import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.ForbiddenException;
@@ -97,14 +97,14 @@ public boolean apply(PathSegment input)
AuthorizationUtils.DATASOURCE_READ_RA_GENERATOR :
AuthorizationUtils.DATASOURCE_WRITE_RA_GENERATOR;
- Access authResult = AuthorizationUtils.authorizeAllResourceActions(
+ AuthorizationResult authResult = AuthorizationUtils.authorizeAllResourceActions(
getReq(),
Iterables.transform(spec.getDataSources(), resourceActionFunction),
getAuthorizerMapper()
);
- if (!authResult.isAllowed()) {
- throw new ForbiddenException(authResult.toString());
+ if (!authResult.allowAccessWithNoRestriction()) {
+ throw new ForbiddenException(authResult.getErrorMessage());
}
return request;
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/http/security/TaskResourceFilter.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/http/security/TaskResourceFilter.java
index a9f66ce30e72..2d23c443125d 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/http/security/TaskResourceFilter.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/http/security/TaskResourceFilter.java
@@ -29,7 +29,7 @@
import org.apache.druid.indexing.overlord.TaskQueryTool;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.server.http.security.AbstractResourceFilter;
-import org.apache.druid.server.security.Access;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.AuthorizationUtils;
import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.ForbiddenException;
@@ -92,14 +92,14 @@ public ContainerRequest filter(ContainerRequest request)
getAction(request)
);
- final Access authResult = AuthorizationUtils.authorizeResourceAction(
+ final AuthorizationResult authResult = AuthorizationUtils.authorizeResourceAction(
getReq(),
resourceAction,
getAuthorizerMapper()
);
- if (!authResult.isAllowed()) {
- throw new ForbiddenException(authResult.toString());
+ if (!authResult.allowAccessWithNoRestriction()) {
+ throw new ForbiddenException(authResult.getErrorMessage());
}
return request;
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/sampler/SamplerResource.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/sampler/SamplerResource.java
index 75618ddae42a..9c2bf31a18d5 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/sampler/SamplerResource.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/sampler/SamplerResource.java
@@ -23,9 +23,9 @@
import com.google.inject.Inject;
import org.apache.druid.client.indexing.SamplerResponse;
import org.apache.druid.client.indexing.SamplerSpec;
-import org.apache.druid.server.security.Access;
import org.apache.druid.server.security.Action;
import org.apache.druid.server.security.AuthConfig;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.AuthorizationUtils;
import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.ForbiddenException;
@@ -72,14 +72,14 @@ public SamplerResponse post(final SamplerSpec sampler, @Context final HttpServle
resourceActions.addAll(sampler.getInputSourceResources());
}
- Access authResult = AuthorizationUtils.authorizeAllResourceActions(
+ AuthorizationResult authResult = AuthorizationUtils.authorizeAllResourceActions(
req,
resourceActions,
authorizerMapper
);
- if (!authResult.isAllowed()) {
- throw new ForbiddenException(authResult.getMessage());
+ if (!authResult.allowAccessWithNoRestriction()) {
+ throw new ForbiddenException(authResult.getErrorMessage());
}
return sampler.sample();
}
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/supervisor/SupervisorResource.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/supervisor/SupervisorResource.java
index 130f617d59d1..3190835c3e67 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/supervisor/SupervisorResource.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/supervisor/SupervisorResource.java
@@ -41,9 +41,9 @@
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.java.util.common.UOE;
import org.apache.druid.segment.incremental.ParseExceptionReport;
-import org.apache.druid.server.security.Access;
import org.apache.druid.server.security.Action;
import org.apache.druid.server.security.AuthConfig;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.AuthorizationUtils;
import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.ForbiddenException;
@@ -142,14 +142,14 @@ public Response specPost(final SupervisorSpec spec, @Context final HttpServletRe
.build();
}
- Access authResult = AuthorizationUtils.authorizeAllResourceActions(
+ AuthorizationResult authResult = AuthorizationUtils.authorizeAllResourceActions(
req,
resourceActions,
authorizerMapper
);
- if (!authResult.isAllowed()) {
- throw new ForbiddenException(authResult.toString());
+ if (!authResult.allowAccessWithNoRestriction()) {
+ throw new ForbiddenException(authResult.getErrorMessage());
}
manager.createOrUpdateAndStartSupervisor(spec);
@@ -410,13 +410,16 @@ public Response shutdown(@PathParam("id") final String id)
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
@ResourceFilters(SupervisorResourceFilter.class)
- public Response handoffTaskGroups(@PathParam("id") final String id, @Nonnull final HandoffTaskGroupsRequest handoffTaskGroupsRequest)
+ public Response handoffTaskGroups(
+ @PathParam("id") final String id,
+ @Nonnull final HandoffTaskGroupsRequest handoffTaskGroupsRequest
+ )
{
List taskGroupIds = handoffTaskGroupsRequest.getTaskGroupIds();
if (CollectionUtils.isNullOrEmpty(taskGroupIds)) {
return Response.status(Response.Status.BAD_REQUEST)
- .entity(ImmutableMap.of("error", "List of task groups to handoff can't be empty"))
- .build();
+ .entity(ImmutableMap.of("error", "List of task groups to handoff can't be empty"))
+ .build();
}
return asLeaderWithSupervisorManager(
@@ -426,14 +429,20 @@ public Response handoffTaskGroups(@PathParam("id") final String id, @Nonnull fin
return Response.ok().build();
} else {
return Response.status(Response.Status.NOT_FOUND)
- .entity(ImmutableMap.of("error", StringUtils.format("Supervisor was not found [%s]", id)))
- .build();
+ .entity(ImmutableMap.of("error", StringUtils.format("Supervisor was not found [%s]", id)))
+ .build();
}
}
catch (NotImplementedException e) {
return Response.status(Response.Status.BAD_REQUEST)
- .entity(ImmutableMap.of("error", StringUtils.format("Supervisor [%s] does not support early handoff", id)))
- .build();
+ .entity(ImmutableMap.of(
+ "error",
+ StringUtils.format(
+ "Supervisor [%s] does not support early handoff",
+ id
+ )
+ ))
+ .build();
}
}
);
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTaskRunner.java b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTaskRunner.java
index fddd8f8e4536..b593b841ed44 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTaskRunner.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTaskRunner.java
@@ -89,8 +89,8 @@
import org.apache.druid.segment.realtime.appenderator.SegmentsAndCommitMetadata;
import org.apache.druid.segment.realtime.appenderator.StreamAppenderator;
import org.apache.druid.segment.realtime.appenderator.StreamAppenderatorDriver;
-import org.apache.druid.server.security.Access;
import org.apache.druid.server.security.Action;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.timeline.DataSegment;
import org.apache.druid.utils.CollectionUtils;
@@ -144,7 +144,8 @@
* @param Sequence Number Type
*/
@SuppressWarnings("CheckReturnValue")
-public abstract class SeekableStreamIndexTaskRunner implements ChatHandler
+public abstract class SeekableStreamIndexTaskRunner
+ implements ChatHandler
{
private static final String CTX_KEY_LOOKUP_TIER = "lookupTier";
@@ -278,12 +279,11 @@ public SeekableStreamIndexTaskRunner(
rejectionPeriodUpdaterExec = Execs.scheduledSingleThreaded("RejectionPeriodUpdater-Exec--%d");
if (ioConfig.getRefreshRejectionPeriodsInMinutes() != null) {
- rejectionPeriodUpdaterExec
- .scheduleWithFixedDelay(
- this::refreshMinMaxMessageTime,
- ioConfig.getRefreshRejectionPeriodsInMinutes(),
- ioConfig.getRefreshRejectionPeriodsInMinutes(),
- TimeUnit.MINUTES);
+ rejectionPeriodUpdaterExec.scheduleWithFixedDelay(this::refreshMinMaxMessageTime,
+ ioConfig.getRefreshRejectionPeriodsInMinutes(),
+ ioConfig.getRefreshRejectionPeriodsInMinutes(),
+ TimeUnit.MINUTES
+ );
}
resetNextCheckpointTime();
}
@@ -759,10 +759,18 @@ public void onFailure(Throwable t)
if (System.currentTimeMillis() > nextCheckpointTime) {
sequenceToCheckpoint = getLastSequenceMetadata();
- log.info("Next checkpoint time, updating sequenceToCheckpoint, SequenceToCheckpoint: [%s]", sequenceToCheckpoint);
+ log.info(
+ "Next checkpoint time, updating sequenceToCheckpoint, SequenceToCheckpoint: [%s]",
+ sequenceToCheckpoint
+ );
}
if (pushTriggeringAddResult != null) {
- log.info("Hit the row limit updating sequenceToCheckpoint, SequenceToCheckpoint: [%s], rowInSegment: [%s], TotalRows: [%s]", sequenceToCheckpoint, pushTriggeringAddResult.getNumRowsInSegment(), pushTriggeringAddResult.getTotalNumRowsInAppenderator());
+ log.info(
+ "Hit the row limit updating sequenceToCheckpoint, SequenceToCheckpoint: [%s], rowInSegment: [%s], TotalRows: [%s]",
+ sequenceToCheckpoint,
+ pushTriggeringAddResult.getNumRowsInSegment(),
+ pushTriggeringAddResult.getTotalNumRowsInAppenderator()
+ );
}
if (sequenceToCheckpoint != null && stillReading) {
@@ -1128,14 +1136,14 @@ private synchronized void persistSequences() throws IOException
/**
* Return a map of reports for the task.
- *
+ *
* A successfull task should always have a null errorMsg. Segments availability is inherently confirmed
* if the task was succesful.
- *
+ *
* A falied task should always have a non-null errorMsg. Segment availability is never confirmed if the task
* was not successful.
*
- * @param errorMsg Nullable error message for the task. null if task succeeded.
+ * @param errorMsg Nullable error message for the task. null if task succeeded.
* @param handoffWaitMs Milliseconds waited for segments to be handed off.
* @return Map of reports for the task.
*/
@@ -1446,7 +1454,7 @@ protected void sendResetRequestAndWait(
*
* @return authorization result
*/
- private Access authorizationCheck(final HttpServletRequest req, Action action)
+ private AuthorizationResult authorizationCheck(final HttpServletRequest req, Action action)
{
return IndexTaskUtils.datasourceAuthorizationCheck(req, action, task.getDataSource(), authorizerMapper);
}
@@ -2023,9 +2031,7 @@ private boolean verifyRecordInRange(
*
* @param toolbox task toolbox
* @param checkpointsString the json-serialized checkpoint string
- *
* @return checkpoint
- *
* @throws IOException jsonProcessingException
*/
@Nullable
@@ -2039,7 +2045,6 @@ protected abstract TreeMap> ge
* This is what would become the start offsets of the next reader, if we stopped reading now.
*
* @param sequenceNumber the sequence number that has already been processed
- *
* @return next sequence number to be stored
*/
protected abstract SequenceOffsetType getNextStartOffset(SequenceOffsetType sequenceNumber);
@@ -2049,7 +2054,6 @@ protected abstract TreeMap> ge
*
* @param mapper json objectMapper
* @param object metadata
- *
* @return SeekableStreamEndSequenceNumbers
*/
protected abstract SeekableStreamEndSequenceNumbers deserializePartitionsFromMetadata(
@@ -2063,9 +2067,7 @@ protected abstract SeekableStreamEndSequenceNumbers createDataSourceMetadata(
@@ -2089,7 +2090,6 @@ protected abstract SeekableStreamDataSourceMetadata createSequenceNumber(SequenceOffsetType sequenceNumber);
@@ -2117,7 +2117,11 @@ private void refreshMinMaxMessageTime()
minMessageTime = minMessageTime.plusMinutes(ioConfig.getRefreshRejectionPeriodsInMinutes().intValue());
maxMessageTime = maxMessageTime.plusMinutes(ioConfig.getRefreshRejectionPeriodsInMinutes().intValue());
- log.info(StringUtils.format("Updated min and max messsage times to %s and %s respectively.", minMessageTime, maxMessageTime));
+ log.info(StringUtils.format(
+ "Updated min and max messsage times to %s and %s respectively.",
+ minMessageTime,
+ maxMessageTime
+ ));
}
public boolean withinMinMaxRecordTime(final InputRow row)
diff --git a/indexing-service/src/test/java/org/apache/druid/indexing/overlord/supervisor/SupervisorResourceTest.java b/indexing-service/src/test/java/org/apache/druid/indexing/overlord/supervisor/SupervisorResourceTest.java
index 1fd7af69e123..f07c6c13ab88 100644
--- a/indexing-service/src/test/java/org/apache/druid/indexing/overlord/supervisor/SupervisorResourceTest.java
+++ b/indexing-service/src/test/java/org/apache/druid/indexing/overlord/supervisor/SupervisorResourceTest.java
@@ -114,13 +114,13 @@ public Authorizer getAuthorizer(String name)
} else {
if (resource.getType().equals(ResourceType.DATASOURCE)) {
if (resource.getName().equals("datasource2")) {
- return new Access(false, "not authorized.");
+ return Access.deny("not authorized.");
} else {
return Access.OK;
}
} else if (resource.getType().equals(ResourceType.EXTERNAL)) {
if (resource.getName().equals("test")) {
- return new Access(false, "not authorized.");
+ return Access.deny("not authorized.");
} else {
return Access.OK;
}
diff --git a/pom.xml b/pom.xml
index c67df8127a03..36618adacdd8 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1647,12 +1647,7 @@
check
-
- org.codehaus.mojo.signature
-
- java18
- 1.0
-
+
diff --git a/processing/src/main/java/org/apache/druid/query/DataSource.java b/processing/src/main/java/org/apache/druid/query/DataSource.java
index 360c339627f9..7c5f52d08fee 100644
--- a/processing/src/main/java/org/apache/druid/query/DataSource.java
+++ b/processing/src/main/java/org/apache/druid/query/DataSource.java
@@ -23,12 +23,16 @@
import com.fasterxml.jackson.annotation.JsonTypeInfo;
import org.apache.druid.query.planning.DataSourceAnalysis;
import org.apache.druid.query.planning.PreJoinableClause;
+import org.apache.druid.query.policy.Policy;
import org.apache.druid.segment.SegmentReference;
import java.util.List;
+import java.util.Map;
+import java.util.Optional;
import java.util.Set;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Function;
+import java.util.stream.Collectors;
/**
* Represents a source... of data... for a query. Analogous to the "FROM" clause in SQL.
@@ -43,7 +47,8 @@
@JsonSubTypes.Type(value = InlineDataSource.class, name = "inline"),
@JsonSubTypes.Type(value = GlobalTableDataSource.class, name = "globalTable"),
@JsonSubTypes.Type(value = UnnestDataSource.class, name = "unnest"),
- @JsonSubTypes.Type(value = FilteredDataSource.class, name = "filter")
+ @JsonSubTypes.Type(value = FilteredDataSource.class, name = "filter"),
+ @JsonSubTypes.Type(value = RestrictedDataSource.class, name = "restrict")
})
public interface DataSource
{
@@ -88,11 +93,11 @@ public interface DataSource
/**
* Returns true if this datasource can be the base datasource of query processing.
- *
+ *
* Base datasources drive query processing. If the base datasource is {@link TableDataSource}, for example, queries
* are processed in parallel on data servers. If the base datasource is {@link InlineDataSource}, queries are
* processed on the Broker. See {@link DataSourceAnalysis#getBaseDataSource()} for further discussion.
- *
+ *
* Datasources that are *not* concrete must be pre-processed in some way before they can be processed by the main
* query stack. For example, {@link QueryDataSource} must be executed first and substituted with its results.
*
@@ -118,6 +123,29 @@ public interface DataSource
*/
DataSource withUpdatedDataSource(DataSource newSource);
+ /**
+ * Returns the query with an updated datasource based on the policy restrictions on tables.
+ *
+ * If this datasource contains no table, no changes should occur.
+ *
+ * @param policyMap a mapping of table names to policy restrictions. A missing key is different from an empty value:
+ *
+ * a missing key means the table has never been permission checked.
+ * an empty value indicates the table doesn't have any policy restrictions, it has been permission checked.
+ * @return the updated datasource, with restrictions applied in the datasource tree
+ * @throws IllegalStateException when mapping a RestrictedDataSource, unless the table has a NoRestrictionPolicy in
+ * the policyMap (used by druid-internal). Missing policy or adding a
+ * non-NoRestrictionPolicy to RestrictedDataSource would throw.
+ */
+ default DataSource withPolicies(Map> policyMap)
+ {
+ List children = this.getChildren()
+ .stream()
+ .map(child -> child.withPolicies(policyMap))
+ .collect(Collectors.toList());
+ return this.withChildren(children);
+ }
+
/**
* Compute a cache key prefix for a data source. This includes the data sources that participate in the RHS of a
* join as well as any query specific constructs associated with join data source such as base table filter. This key prefix
diff --git a/processing/src/main/java/org/apache/druid/query/JoinDataSource.java b/processing/src/main/java/org/apache/druid/query/JoinDataSource.java
index 2eb459cf8cc3..9662a99ab815 100644
--- a/processing/src/main/java/org/apache/druid/query/JoinDataSource.java
+++ b/processing/src/main/java/org/apache/druid/query/JoinDataSource.java
@@ -551,7 +551,10 @@ private static Triple> flattenJoi
// Will need an instanceof check here
// A future work should look into if the flattenJoin
// can be refactored to omit these instanceof checks
- while (current instanceof JoinDataSource || current instanceof UnnestDataSource || current instanceof FilteredDataSource) {
+ while (current instanceof JoinDataSource
+ || current instanceof UnnestDataSource
+ || current instanceof FilteredDataSource
+ || current instanceof RestrictedDataSource) {
if (current instanceof JoinDataSource) {
final JoinDataSource joinDataSource = (JoinDataSource) current;
current = joinDataSource.getLeft();
@@ -568,6 +571,9 @@ private static Triple> flattenJoi
} else if (current instanceof UnnestDataSource) {
final UnnestDataSource unnestDataSource = (UnnestDataSource) current;
current = unnestDataSource.getBase();
+ } else if (current instanceof RestrictedDataSource) {
+ final RestrictedDataSource restrictedDataSource = (RestrictedDataSource) current;
+ current = restrictedDataSource.getBase();
} else {
final FilteredDataSource filteredDataSource = (FilteredDataSource) current;
current = filteredDataSource.getBase();
diff --git a/processing/src/main/java/org/apache/druid/query/Query.java b/processing/src/main/java/org/apache/druid/query/Query.java
index 3ed1dcbe0ead..3c085a6adede 100644
--- a/processing/src/main/java/org/apache/druid/query/Query.java
+++ b/processing/src/main/java/org/apache/druid/query/Query.java
@@ -126,7 +126,7 @@ default QueryContext context()
/**
* Get context value and cast to ContextType in an unsafe way.
- *
+ *
* For safe conversion, it's recommended to use following methods instead:
*
* {@link QueryContext#getBoolean(String)}
@@ -178,7 +178,7 @@ default HumanReadableBytes getContextHumanReadableBytes(String key, HumanReadabl
* {@link QueryRunnerFactory#mergeRunners(QueryProcessingPool, Iterable)} calls. This is used to combine streams of
* results from different sources; for example, it's used by historicals to combine streams from different segments,
* and it's used by the broker to combine streams from different historicals.
- *
+ *
* Important note: sometimes, this ordering is used in a type-unsafe way to order @{code Result}
* objects. Because of this, implementations should fall back to {@code Ordering.natural()} when they are given an
* object that is not of type T.
@@ -189,7 +189,7 @@ default HumanReadableBytes getContextHumanReadableBytes(String key, HumanReadabl
/**
* Returns a new query, identical to this one, but with a different associated {@link QuerySegmentSpec}.
- *
+ *
* This often changes the behavior of {@link #getRunner(QuerySegmentWalker)}, since most queries inherit that method
* from {@link BaseQuery}, which implements it by calling {@link QuerySegmentSpec#lookup}.
*/
@@ -264,12 +264,12 @@ default VirtualColumns getVirtualColumns()
/**
* Returns the set of columns that this query will need to access out of its datasource.
- *
+ *
* This method does not "look into" what the datasource itself is doing. For example, if a query is built on a
* {@link QueryDataSource}, this method will not return the columns used by that subquery. As another example, if a
* query is built on a {@link JoinDataSource}, this method will not return the columns from the underlying datasources
* that are used by the join condition, unless those columns are also used by this query in other ways.
- *
+ *
* Returns null if the set of required columns cannot be known ahead of time.
*/
@Nullable
diff --git a/processing/src/main/java/org/apache/druid/query/RestrictedDataSource.java b/processing/src/main/java/org/apache/druid/query/RestrictedDataSource.java
new file mode 100644
index 000000000000..f7f91072fc64
--- /dev/null
+++ b/processing/src/main/java/org/apache/druid/query/RestrictedDataSource.java
@@ -0,0 +1,215 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.query;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.google.common.collect.ImmutableList;
+import org.apache.druid.java.util.common.IAE;
+import org.apache.druid.java.util.common.ISE;
+import org.apache.druid.query.planning.DataSourceAnalysis;
+import org.apache.druid.query.policy.NoRestrictionPolicy;
+import org.apache.druid.query.policy.Policy;
+import org.apache.druid.segment.RestrictedSegment;
+import org.apache.druid.segment.SegmentReference;
+import org.apache.druid.utils.JvmUtils;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Function;
+
+/**
+ * Reperesents a TableDataSource with policy restriction.
+ *
+ * A RestrictedDataSource means the base TableDataSource has policy imposed. A table without any policy should never be
+ * transformed to a RestrictedDataSource. Druid internal system and admin users would have a {@link NoRestrictionPolicy}.
+ */
+public class RestrictedDataSource implements DataSource
+{
+ private final TableDataSource base;
+
+ private final Policy policy;
+
+ @JsonProperty("base")
+ public TableDataSource getBase()
+ {
+ return base;
+ }
+
+ @JsonProperty("policy")
+ public Policy getPolicy()
+ {
+ return policy;
+ }
+
+ RestrictedDataSource(TableDataSource base, Policy policy)
+ {
+ this.base = base;
+ this.policy = policy;
+ }
+
+ @JsonCreator
+ public static RestrictedDataSource create(
+ @JsonProperty("base") DataSource base,
+ @JsonProperty("policy") Policy policy
+ )
+ {
+ if (!(base instanceof TableDataSource)) {
+ throw new IAE("Expected a TableDataSource, got data source type [%s]", base.getClass());
+ }
+ if (Objects.isNull(policy)) {
+ throw new IAE("Policy can't be null for RestrictedDataSource");
+ }
+ return new RestrictedDataSource((TableDataSource) base, policy);
+ }
+
+ @Override
+ public Set getTableNames()
+ {
+ return base.getTableNames();
+ }
+
+ @Override
+ public List getChildren()
+ {
+ return ImmutableList.of(base);
+ }
+
+ @Override
+ public DataSource withChildren(List children)
+ {
+ if (children.size() != 1) {
+ throw new IAE("Expected [1] child, got [%d]", children.size());
+ }
+
+ return create(children.get(0), policy);
+ }
+
+ @Override
+ public boolean isCacheable(boolean isBroker)
+ {
+ return false;
+ }
+
+ @Override
+ public boolean isGlobal()
+ {
+ return base.isGlobal();
+ }
+
+ @Override
+ public boolean isConcrete()
+ {
+ return base.isConcrete();
+ }
+
+ @Override
+ public Function createSegmentMapFunction(
+ Query query,
+ AtomicLong cpuTimeAccumulator
+ )
+ {
+ return JvmUtils.safeAccumulateThreadCpuTime(
+ cpuTimeAccumulator,
+ () -> base.createSegmentMapFunction(
+ query,
+ cpuTimeAccumulator
+ ).andThen((segment) -> (new RestrictedSegment(segment, policy)))
+ );
+ }
+
+ @Override
+ public DataSource withUpdatedDataSource(DataSource newSource)
+ {
+ return create(newSource, policy);
+ }
+
+ @Override
+ public DataSource withPolicies(Map> policyMap)
+ {
+ if (!policyMap.containsKey(base.getName())) {
+ throw new ISE("Missing policy check result for table [%s]", base.getName());
+ }
+
+ Optional newPolicy = policyMap.getOrDefault(base.getName(), Optional.empty());
+ if (!newPolicy.isPresent()) {
+ throw new ISE(
+ "No restriction found on table [%s], but had policy [%s] before.",
+ base.getName(),
+ policy
+ );
+ }
+ if (!(newPolicy.get() instanceof NoRestrictionPolicy)) {
+ throw new ISE(
+ "Multiple restrictions on table [%s]: policy [%s] and policy [%s]",
+ base.getName(),
+ policy,
+ newPolicy.get()
+ );
+ }
+ // The only happy path is, newPolicy is NoRestrictionPolicy, which means this comes from an anthenticated and
+ // authorized druid-internal request.
+ return this;
+ }
+
+ @Override
+ public String toString()
+ {
+ return "RestrictedDataSource{" +
+ "base=" + base +
+ ", policy=" + policy + "}";
+ }
+
+ @Override
+ public byte[] getCacheKey()
+ {
+ return new byte[0];
+ }
+
+ @Override
+ public DataSourceAnalysis getAnalysis()
+ {
+ final DataSource current = this.getBase();
+ return current.getAnalysis();
+ }
+
+ @Override
+ public boolean equals(Object o)
+ {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ RestrictedDataSource that = (RestrictedDataSource) o;
+ return Objects.equals(base, that.base) && Objects.equals(policy, that.policy);
+ }
+
+ @Override
+ public int hashCode()
+ {
+ return Objects.hash(base, policy);
+ }
+}
diff --git a/processing/src/main/java/org/apache/druid/query/TableDataSource.java b/processing/src/main/java/org/apache/druid/query/TableDataSource.java
index fe9cf46e37b9..d735a75928d0 100644
--- a/processing/src/main/java/org/apache/druid/query/TableDataSource.java
+++ b/processing/src/main/java/org/apache/druid/query/TableDataSource.java
@@ -25,11 +25,14 @@
import com.google.common.base.Preconditions;
import org.apache.druid.java.util.common.IAE;
import org.apache.druid.query.planning.DataSourceAnalysis;
+import org.apache.druid.query.policy.Policy;
import org.apache.druid.segment.SegmentReference;
import java.util.Collections;
import java.util.List;
+import java.util.Map;
import java.util.Objects;
+import java.util.Optional;
import java.util.Set;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Function;
@@ -112,6 +115,17 @@ public DataSource withUpdatedDataSource(DataSource newSource)
return newSource;
}
+ @Override
+ public DataSource withPolicies(Map> policyMap)
+ {
+ Optional policy = policyMap.getOrDefault(name, Optional.empty());
+ if (!policy.isPresent()) {
+ // Skip adding restriction on table if there's no policy restriction found.
+ return this;
+ }
+ return RestrictedDataSource.create(this, policy.get());
+ }
+
@Override
public byte[] getCacheKey()
{
diff --git a/processing/src/main/java/org/apache/druid/query/filter/NotDimFilter.java b/processing/src/main/java/org/apache/druid/query/filter/NotDimFilter.java
index 67f96221d3d1..4619754f880e 100644
--- a/processing/src/main/java/org/apache/druid/query/filter/NotDimFilter.java
+++ b/processing/src/main/java/org/apache/druid/query/filter/NotDimFilter.java
@@ -39,7 +39,6 @@ public static NotDimFilter of(DimFilter field)
return new NotDimFilter(field);
}
-
private final DimFilter field;
@JsonCreator
diff --git a/processing/src/main/java/org/apache/druid/query/metadata/metadata/SegmentMetadataQuery.java b/processing/src/main/java/org/apache/druid/query/metadata/metadata/SegmentMetadataQuery.java
index f2d434bab8a5..e0e5f81f45f3 100644
--- a/processing/src/main/java/org/apache/druid/query/metadata/metadata/SegmentMetadataQuery.java
+++ b/processing/src/main/java/org/apache/druid/query/metadata/metadata/SegmentMetadataQuery.java
@@ -249,14 +249,14 @@ public List getIntervals()
public String toString()
{
return "SegmentMetadataQuery{" +
- "dataSource='" + getDataSource() + '\'' +
- ", querySegmentSpec=" + getQuerySegmentSpec() +
- ", toInclude=" + toInclude +
- ", merge=" + merge +
- ", usingDefaultInterval=" + usingDefaultInterval +
- ", analysisTypes=" + analysisTypes +
- ", aggregatorMergeStrategy=" + aggregatorMergeStrategy +
- '}';
+ "dataSource='" + getDataSource() + '\'' +
+ ", querySegmentSpec=" + getQuerySegmentSpec() +
+ ", toInclude=" + toInclude +
+ ", merge=" + merge +
+ ", usingDefaultInterval=" + usingDefaultInterval +
+ ", analysisTypes=" + analysisTypes +
+ ", aggregatorMergeStrategy=" + aggregatorMergeStrategy +
+ '}';
}
@Override
@@ -273,10 +273,10 @@ public boolean equals(Object o)
}
SegmentMetadataQuery that = (SegmentMetadataQuery) o;
return merge == that.merge &&
- usingDefaultInterval == that.usingDefaultInterval &&
- Objects.equals(toInclude, that.toInclude) &&
- Objects.equals(analysisTypes, that.analysisTypes) &&
- Objects.equals(aggregatorMergeStrategy, that.aggregatorMergeStrategy);
+ usingDefaultInterval == that.usingDefaultInterval &&
+ Objects.equals(toInclude, that.toInclude) &&
+ Objects.equals(analysisTypes, that.analysisTypes) &&
+ Objects.equals(aggregatorMergeStrategy, that.aggregatorMergeStrategy);
}
@Override
diff --git a/processing/src/main/java/org/apache/druid/query/policy/NoRestrictionPolicy.java b/processing/src/main/java/org/apache/druid/query/policy/NoRestrictionPolicy.java
new file mode 100644
index 000000000000..5753941a15bf
--- /dev/null
+++ b/processing/src/main/java/org/apache/druid/query/policy/NoRestrictionPolicy.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.query.policy;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import org.apache.druid.segment.CursorBuildSpec;
+
+/**
+ * Represents a special kind of policy restriction, indicating that this table is restricted, but doesn't impose any restriction
+ * to a user.
+ */
+public class NoRestrictionPolicy implements Policy
+{
+ NoRestrictionPolicy()
+ {
+ }
+
+ @JsonCreator
+ public static NoRestrictionPolicy instance()
+ {
+ return new NoRestrictionPolicy();
+ }
+
+ @Override
+ public CursorBuildSpec visit(CursorBuildSpec spec)
+ {
+ return spec;
+ }
+
+ @Override
+ public String toString()
+ {
+ return "NO_RESTRICTION";
+ }
+
+ @Override
+ public boolean equals(Object o)
+ {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ return true;
+ }
+
+ @Override
+ public int hashCode()
+ {
+ return 0;
+ }
+}
diff --git a/processing/src/main/java/org/apache/druid/query/policy/Policy.java b/processing/src/main/java/org/apache/druid/query/policy/Policy.java
new file mode 100644
index 000000000000..11a1aff4fbb8
--- /dev/null
+++ b/processing/src/main/java/org/apache/druid/query/policy/Policy.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.query.policy;
+
+import com.fasterxml.jackson.annotation.JsonSubTypes;
+import com.fasterxml.jackson.annotation.JsonTypeInfo;
+import org.apache.druid.guice.annotations.UnstableApi;
+import org.apache.druid.segment.CursorBuildSpec;
+
+/**
+ * Extensible interface for a granular-level (e.x. row filter) restriction on read-table access. Implementations must be
+ * Jackson-serializable.
+ */
+@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type")
+@JsonSubTypes({
+ @JsonSubTypes.Type(value = RowFilterPolicy.class, name = "row"),
+ @JsonSubTypes.Type(value = NoRestrictionPolicy.class, name = "noRestriction")
+})
+@UnstableApi
+public interface Policy
+{
+ /**
+ * Apply this policy to a {@link CursorBuildSpec} to seamlessly enforce policies for cursor-based queries. The
+ * application must encapsulate 100% of the requirements of this policy.
+ */
+ CursorBuildSpec visit(CursorBuildSpec spec);
+
+}
diff --git a/processing/src/main/java/org/apache/druid/query/policy/RowFilterPolicy.java b/processing/src/main/java/org/apache/druid/query/policy/RowFilterPolicy.java
new file mode 100644
index 000000000000..97620381344a
--- /dev/null
+++ b/processing/src/main/java/org/apache/druid/query/policy/RowFilterPolicy.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.query.policy;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.google.common.base.Preconditions;
+import org.apache.druid.query.filter.DimFilter;
+import org.apache.druid.query.filter.Filter;
+import org.apache.druid.segment.CursorBuildSpec;
+import org.apache.druid.segment.filter.Filters;
+
+import javax.annotation.Nonnull;
+import java.util.Arrays;
+import java.util.Objects;
+
+/**
+ * Represents a basic row filter policy restriction.
+ */
+public class RowFilterPolicy implements Policy
+{
+ private final DimFilter rowFilter;
+
+ @JsonCreator
+ RowFilterPolicy(@Nonnull @JsonProperty("rowFilter") DimFilter rowFilter)
+ {
+ this.rowFilter = Preconditions.checkNotNull(rowFilter, "rowFilter can't be null");
+ }
+
+ public static RowFilterPolicy from(@Nonnull DimFilter rowFilter)
+ {
+ return new RowFilterPolicy(rowFilter);
+ }
+
+ @JsonProperty
+ public DimFilter getRowFilter()
+ {
+ return rowFilter;
+ }
+
+ @Override
+ public CursorBuildSpec visit(CursorBuildSpec spec)
+ {
+ CursorBuildSpec.CursorBuildSpecBuilder builder = CursorBuildSpec.builder(spec);
+ final Filter filter = spec.getFilter();
+ final Filter policyFilter = this.rowFilter.toFilter();
+
+ builder.setFilter(Filters.and(Arrays.asList(policyFilter, filter)));
+ return builder.build();
+ }
+
+ @Override
+ public String toString()
+ {
+ return "RowFilterPolicy{" + "rowFilter=" + rowFilter + '}';
+ }
+
+ @Override
+ public boolean equals(Object o)
+ {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ RowFilterPolicy that = (RowFilterPolicy) o;
+ return Objects.equals(rowFilter, that.rowFilter);
+ }
+
+ @Override
+ public int hashCode()
+ {
+ return Objects.hash(rowFilter);
+ }
+
+}
diff --git a/processing/src/main/java/org/apache/druid/segment/BypassRestrictedSegment.java b/processing/src/main/java/org/apache/druid/segment/BypassRestrictedSegment.java
new file mode 100644
index 000000000000..5ca7e6314513
--- /dev/null
+++ b/processing/src/main/java/org/apache/druid/segment/BypassRestrictedSegment.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.segment;
+
+import org.apache.druid.query.policy.Policy;
+
+/**
+ * A {@link SegmentReference} wrapper with a {@link Policy} restriction that is not applied. Instead, it relies on the
+ * caller to apply the policy.
+ *
+ * This class is useful when a query engine needs direct access to interfaces that cannot have policies applied
+ * transparently. For example, {@link RestrictedSegment} returns null for {@link #asQueryableIndex} because it cannot
+ * apply policies transparently to a {@link QueryableIndex}. To use one, a query engine needs to obtain a
+ * {@link BypassRestrictedSegment} and apply the policies itself.
+ */
+class BypassRestrictedSegment extends WrappedSegmentReference
+{
+ protected final Policy policy;
+
+ public BypassRestrictedSegment(
+ SegmentReference delegate,
+ Policy policy
+ )
+ {
+ super(delegate);
+ this.policy = policy;
+ }
+
+ public Policy getPolicy()
+ {
+ return policy;
+ }
+
+ @Override
+ public CursorFactory asCursorFactory()
+ {
+ return delegate.asCursorFactory();
+ }
+
+ @Override
+ public QueryableIndex asQueryableIndex()
+ {
+ return delegate.asQueryableIndex();
+ }
+
+ @Override
+ public T as(Class clazz)
+ {
+ return delegate.as(clazz);
+ }
+}
diff --git a/processing/src/main/java/org/apache/druid/segment/RestrictedCursorFactory.java b/processing/src/main/java/org/apache/druid/segment/RestrictedCursorFactory.java
new file mode 100644
index 000000000000..de1d99e573c8
--- /dev/null
+++ b/processing/src/main/java/org/apache/druid/segment/RestrictedCursorFactory.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.segment;
+
+import org.apache.druid.query.policy.Policy;
+import org.apache.druid.segment.column.ColumnCapabilities;
+import org.apache.druid.segment.column.RowSignature;
+
+import javax.annotation.Nullable;
+
+/**
+ * A factory class for creating {@code Cursor} instances with strict adherence to {@link Policy} restrictions. Created
+ * by {@link RestrictedSegment#asCursorFactory()}, and applies policies transparently.
+ *
+ * The {@code CursorFactory} simplifies the process of initializing and retrieving {@code Cursor} objects while ensuring
+ * that any cursor created complies with the {@link Policy} restrictions.
+ *
+ * Policy enforcement in {@link #makeCursorHolder}:
+ *
+ * Row-level restrictions are enforced by adding filters to {@link CursorBuildSpec}, which is then passed to
+ * delegate for execution. This ensures that only relevant data are accessible by the client.
+ *
+ *
+ */
+public class RestrictedCursorFactory implements CursorFactory
+{
+ private final CursorFactory delegate;
+ private final Policy policy;
+
+ public RestrictedCursorFactory(
+ CursorFactory delegate,
+ Policy policy
+ )
+ {
+ this.delegate = delegate;
+ this.policy = policy;
+ }
+
+ @Override
+ public CursorHolder makeCursorHolder(CursorBuildSpec spec)
+ {
+ return delegate.makeCursorHolder(policy.visit(spec));
+ }
+
+ @Override
+ public RowSignature getRowSignature()
+ {
+ return delegate.getRowSignature();
+ }
+
+ @Nullable
+ @Override
+ public ColumnCapabilities getColumnCapabilities(String column)
+ {
+ return delegate.getColumnCapabilities(column);
+ }
+}
diff --git a/processing/src/main/java/org/apache/druid/segment/RestrictedSegment.java b/processing/src/main/java/org/apache/druid/segment/RestrictedSegment.java
new file mode 100644
index 000000000000..c4a7cb4828e8
--- /dev/null
+++ b/processing/src/main/java/org/apache/druid/segment/RestrictedSegment.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.segment;
+
+import org.apache.druid.query.policy.NoRestrictionPolicy;
+import org.apache.druid.query.policy.Policy;
+import org.apache.druid.timeline.SegmentId;
+import org.joda.time.Interval;
+
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Optional;
+
+/**
+ * A {@link SegmentReference} wrapper with a {@link Policy} restriction that is automatically enforced.
+ * The policy seamlessly governs queries on the wrapped segment, ensuring compliance. For example,
+ * {@link #asCursorFactory()} returns a policy-enforced {@link RestrictedCursorFactory}.
+ *
+ *
+ * Direct access to the policy or the underlying SegmentReference (the delegate) is not allowed.
+ * However, a backdoor is available via {@code as(BypassRestrictedSegment.class)}, allowing access to
+ * a {@link BypassRestrictedSegment} instance, which provides flexibility on policy enforcement.
+ */
+public class RestrictedSegment implements SegmentReference
+{
+ protected final SegmentReference delegate;
+ protected final Policy policy;
+
+ public RestrictedSegment(
+ SegmentReference delegate,
+ Policy policy
+ )
+ {
+ this.delegate = delegate;
+ this.policy = policy;
+ }
+
+ @Override
+ public Optional acquireReferences()
+ {
+ return delegate.acquireReferences();
+ }
+
+ @Override
+ public SegmentId getId()
+ {
+ return delegate.getId();
+ }
+
+ @Override
+ public Interval getDataInterval()
+ {
+ return delegate.getDataInterval();
+ }
+
+ @Override
+ public CursorFactory asCursorFactory()
+ {
+ return new RestrictedCursorFactory(delegate.asCursorFactory(), policy);
+ }
+
+ @Nullable
+ @Override
+ public QueryableIndex asQueryableIndex()
+ {
+ return null;
+ }
+
+ @Nullable
+ @Override
+ public T as(@Nonnull Class clazz)
+ {
+ if (CursorFactory.class.equals(clazz)) {
+ return (T) asCursorFactory();
+ } else if (QueryableIndex.class.equals(clazz)) {
+ return null;
+ } else if (TimeBoundaryInspector.class.equals(clazz)) {
+ return (T) WrappedTimeBoundaryInspector.create(delegate.as(TimeBoundaryInspector.class));
+ } else if (TopNOptimizationInspector.class.equals(clazz)) {
+ return (T) new SimpleTopNOptimizationInspector(policy instanceof NoRestrictionPolicy);
+ } else if (BypassRestrictedSegment.class.equals(clazz)) {
+ // A backdoor solution to get the wrapped segment, effectively bypassing the policy.
+ return (T) new BypassRestrictedSegment(delegate, policy);
+ }
+
+ // Unless we know there's no restriction, it's dangerous to return the implementation of a particular interface.
+ if (policy instanceof NoRestrictionPolicy) {
+ return delegate.as(clazz);
+ }
+ return null;
+ }
+
+ @Override
+ public boolean isTombstone()
+ {
+ return delegate.isTombstone();
+ }
+
+ @Override
+ public void close() throws IOException
+ {
+ delegate.close();
+ }
+
+ @Override
+ public String asString()
+ {
+ return delegate.asString();
+ }
+}
diff --git a/processing/src/main/java/org/apache/druid/segment/Segment.java b/processing/src/main/java/org/apache/druid/segment/Segment.java
index 135f4d556421..14cd1a4da443 100644
--- a/processing/src/main/java/org/apache/druid/segment/Segment.java
+++ b/processing/src/main/java/org/apache/druid/segment/Segment.java
@@ -57,31 +57,31 @@ default StorageAdapter asStorageAdapter()
/**
* Request an implementation of a particular interface.
- *
+ *
* If the passed-in interface is {@link QueryableIndex} or {@link CursorFactory}, then this method behaves
* identically to {@link #asQueryableIndex()} or {@link #asCursorFactory()}. Other interfaces are only
* expected to be requested by callers that have specific knowledge of extra features provided by specific
* segment types. For example, an extension might provide a custom Segment type that can offer both
* StorageAdapter and some new interface. That extension can also offer a Query that uses that new interface.
- *
+ *
* Implementations which accept classes other than {@link QueryableIndex} or {@link CursorFactory} are limited
* to using those classes within the extension. This means that one extension cannot rely on the `Segment.as`
* behavior of another extension.
*
* @param clazz desired interface
* @param desired interface
- *
- * @return instance of clazz, or null if the interface is not supported by this segment
- *
- * @see CursorFactory to make cursors to run queries. Never null.
- * @see QueryableIndex index object, if this is a memory-mapped regular segment.
- * @see IndexedTable table object, if this is a joinable indexed table.
- * @see TimeBoundaryInspector inspector for min/max timestamps, if supported by this segment.
- * @see PhysicalSegmentInspector inspector for physical segment details, if supported by this segment.
- * @see MaxIngestedEventTimeInspector inspector for {@link DataSourceMetadataResultValue#getMaxIngestedEventTime()}
- * @see TopNOptimizationInspector inspector containing information for topN specific optimizations
- * @see CloseableShapeshifter stepping stone to {@link org.apache.druid.query.rowsandcols.RowsAndColumns}.
- *
+ * @return instance of clazz, or null if the interface is not supported by this segment, one of the following:
+ *
+ * {@link CursorFactory}, to make cursors to run queries. Never null.
+ * {@link QueryableIndex}, index object, if this is a memory-mapped regular segment.
+ * {@link IndexedTable}, table object, if this is a joinable indexed table.
+ * {@link TimeBoundaryInspector}, inspector for min/max timestamps, if supported by this segment.
+ * {@link PhysicalSegmentInspector}, inspector for physical segment details, if supported by this segment.
+ * {@link MaxIngestedEventTimeInspector}, inspector for {@link DataSourceMetadataResultValue#getMaxIngestedEventTime()}
+ * {@link TopNOptimizationInspector}, inspector containing information for topN specific optimizations
+ * {@link CloseableShapeshifter}, stepping stone to {@link org.apache.druid.query.rowsandcols.RowsAndColumns}.
+ * {@link BypassRestrictedSegment}, a policy-aware segment, converted from a policy-enforced segment.
+ *
*/
@SuppressWarnings({"unused", "unchecked"})
@Nullable
diff --git a/processing/src/test/java/org/apache/druid/query/DataSourceTest.java b/processing/src/test/java/org/apache/druid/query/DataSourceTest.java
index e7850953a609..cd77f14246fd 100644
--- a/processing/src/test/java/org/apache/druid/query/DataSourceTest.java
+++ b/processing/src/test/java/org/apache/druid/query/DataSourceTest.java
@@ -20,16 +20,23 @@
package org.apache.druid.query;
import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
+import org.apache.druid.java.util.common.ISE;
import org.apache.druid.query.aggregation.LongSumAggregatorFactory;
import org.apache.druid.query.dimension.DefaultDimensionSpec;
+import org.apache.druid.query.filter.NullFilter;
import org.apache.druid.query.groupby.GroupByQuery;
+import org.apache.druid.query.policy.NoRestrictionPolicy;
+import org.apache.druid.query.policy.Policy;
+import org.apache.druid.query.policy.RowFilterPolicy;
import org.apache.druid.segment.TestHelper;
import org.junit.Assert;
import org.junit.Test;
import java.io.IOException;
+import java.util.Optional;
public class DataSourceTest
{
@@ -61,6 +68,20 @@ public void testTableDataSource() throws IOException
Assert.assertEquals(new TableDataSource("somedatasource"), dataSource);
}
+ @Test
+ public void testRestrictedDataSource() throws IOException
+ {
+ DataSource dataSource = JSON_MAPPER.readValue(
+ "{\"type\":\"restrict\",\"base\":{\"type\":\"table\",\"name\":\"somedatasource\"},\"policy\":{\"type\":\"noRestriction\"}}\n",
+ DataSource.class
+ );
+
+ Assert.assertEquals(
+ RestrictedDataSource.create(TableDataSource.create("somedatasource"), NoRestrictionPolicy.instance()),
+ dataSource
+ );
+ }
+
@Test
public void testQueryDataSource() throws IOException
{
@@ -99,4 +120,90 @@ public void testUnionDataSource() throws Exception
final DataSource serde = JSON_MAPPER.readValue(JSON_MAPPER.writeValueAsString(dataSource), DataSource.class);
Assert.assertEquals(dataSource, serde);
}
+
+ @Test
+ public void testMapWithRestriction()
+ {
+ TableDataSource table1 = TableDataSource.create("table1");
+ TableDataSource table2 = TableDataSource.create("table2");
+ TableDataSource table3 = TableDataSource.create("table3");
+ UnionDataSource unionDataSource = new UnionDataSource(Lists.newArrayList(table1, table2, table3));
+ ImmutableMap> restrictions = ImmutableMap.of(
+ "table1",
+ Optional.of(NoRestrictionPolicy.instance()),
+ "table2",
+ Optional.of(NoRestrictionPolicy.instance()),
+ "table3",
+ Optional.of(RowFilterPolicy.from(new NullFilter(
+ "some-column",
+ null
+ )))
+ );
+
+ Assert.assertEquals(
+ unionDataSource.withPolicies(restrictions),
+ new UnionDataSource(Lists.newArrayList(
+ RestrictedDataSource.create(
+ table1,
+ NoRestrictionPolicy.instance()
+ ),
+ RestrictedDataSource.create(
+ table2,
+ NoRestrictionPolicy.instance()
+ ),
+ RestrictedDataSource.create(
+ table3,
+ RowFilterPolicy.from(new NullFilter(
+ "some-column",
+ null
+ ))
+ )
+ ))
+ );
+ }
+
+ @Test
+ public void testMapWithRestriction_onRestrictedDataSource_fromDruidSystem()
+ {
+ RestrictedDataSource restrictedDataSource = RestrictedDataSource.create(
+ TableDataSource.create("table1"),
+ RowFilterPolicy.from(new NullFilter("some-column", null))
+ );
+ // The druid-system should get a NO_RESTRICTION policy attached on a table.
+ ImmutableMap> noRestrictionPolicy = ImmutableMap.of(
+ "table1",
+ Optional.of(NoRestrictionPolicy.instance())
+ );
+
+ Assert.assertEquals(restrictedDataSource, restrictedDataSource.withPolicies(noRestrictionPolicy));
+ }
+
+ @Test
+ public void testMapWithRestriction_onRestrictedDataSource_alwaysThrows()
+ {
+ RestrictedDataSource restrictedDataSource = RestrictedDataSource.create(
+ TableDataSource.create("table1"),
+ RowFilterPolicy.from(new NullFilter("random-column", null))
+ );
+ ImmutableMap> anotherRestrictions = ImmutableMap.of(
+ "table1",
+ Optional.of(RowFilterPolicy.from(new NullFilter("some-column", null)))
+ );
+ ImmutableMap> noPolicyFound = ImmutableMap.of("table1", Optional.empty());
+ ImmutableMap> policyWasNotChecked = ImmutableMap.of();
+
+ ISE e = Assert.assertThrows(ISE.class, () -> restrictedDataSource.withPolicies(anotherRestrictions));
+ Assert.assertEquals(
+ "Multiple restrictions on table [table1]: policy [RowFilterPolicy{rowFilter=random-column IS NULL}] and policy [RowFilterPolicy{rowFilter=some-column IS NULL}]",
+ e.getMessage()
+ );
+
+ ISE e2 = Assert.assertThrows(ISE.class, () -> restrictedDataSource.withPolicies(noPolicyFound));
+ Assert.assertEquals(
+ "No restriction found on table [table1], but had policy [RowFilterPolicy{rowFilter=random-column IS NULL}] before.",
+ e2.getMessage()
+ );
+ ISE e3 = Assert.assertThrows(ISE.class, () -> restrictedDataSource.withPolicies(policyWasNotChecked));
+ Assert.assertEquals("Missing policy check result for table [table1]", e3.getMessage());
+ }
}
diff --git a/processing/src/test/java/org/apache/druid/query/JoinDataSourceTest.java b/processing/src/test/java/org/apache/druid/query/JoinDataSourceTest.java
index 8b2dd464479e..fa57ea196608 100644
--- a/processing/src/test/java/org/apache/druid/query/JoinDataSourceTest.java
+++ b/processing/src/test/java/org/apache/druid/query/JoinDataSourceTest.java
@@ -29,6 +29,7 @@
import org.apache.druid.query.filter.InDimFilter;
import org.apache.druid.query.filter.TrueDimFilter;
import org.apache.druid.query.planning.DataSourceAnalysis;
+import org.apache.druid.query.policy.NoRestrictionPolicy;
import org.apache.druid.segment.TestHelper;
import org.apache.druid.segment.column.ColumnType;
import org.apache.druid.segment.join.JoinConditionAnalysis;
@@ -501,6 +502,27 @@ public void testGetAnalysisWithFilteredDS()
Assert.assertEquals("table1", analysis.getBaseDataSource().getTableNames().iterator().next());
}
+ @Test
+ public void testGetAnalysisWithRestrictedDS()
+ {
+ JoinDataSource dataSource = JoinDataSource.create(
+ RestrictedDataSource.create(
+ new TableDataSource("table1"),
+ NoRestrictionPolicy.instance()
+ ),
+ new TableDataSource("table2"),
+ "j.",
+ "x == \"j.x\"",
+ JoinType.LEFT,
+ null,
+ ExprMacroTable.nil(),
+ null,
+ JoinAlgorithm.BROADCAST
+ );
+ DataSourceAnalysis analysis = dataSource.getAnalysis();
+ Assert.assertEquals("table1", analysis.getBaseDataSource().getTableNames().iterator().next());
+ }
+
@Test
public void test_computeJoinDataSourceCacheKey_keyChangesWithBaseFilter()
{
diff --git a/processing/src/test/java/org/apache/druid/query/RestrictedDataSourceTest.java b/processing/src/test/java/org/apache/druid/query/RestrictedDataSourceTest.java
new file mode 100644
index 000000000000..2a51bc60053b
--- /dev/null
+++ b/processing/src/test/java/org/apache/druid/query/RestrictedDataSourceTest.java
@@ -0,0 +1,173 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.query;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.collect.ImmutableList;
+import nl.jqno.equalsverifier.EqualsVerifier;
+import org.apache.druid.java.util.common.IAE;
+import org.apache.druid.query.filter.TrueDimFilter;
+import org.apache.druid.query.policy.NoRestrictionPolicy;
+import org.apache.druid.query.policy.RowFilterPolicy;
+import org.apache.druid.segment.TestHelper;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.Collections;
+
+public class RestrictedDataSourceTest
+{
+ private final TableDataSource fooDataSource = new TableDataSource("foo");
+ private final TableDataSource barDataSource = new TableDataSource("bar");
+ private final RestrictedDataSource restrictedFooDataSource = RestrictedDataSource.create(
+ fooDataSource,
+ RowFilterPolicy.from(TrueDimFilter.instance())
+ );
+ private final RestrictedDataSource restrictedBarDataSource = RestrictedDataSource.create(
+ barDataSource,
+ NoRestrictionPolicy.instance()
+ );
+
+ @Test
+ public void test_creation_failWithNullPolicy()
+ {
+ IAE e = Assert.assertThrows(IAE.class, () -> RestrictedDataSource.create(fooDataSource, null));
+ Assert.assertEquals(e.getMessage(), "Policy can't be null for RestrictedDataSource");
+ }
+
+ @Test
+ public void test_getTableNames()
+ {
+ Assert.assertEquals(Collections.singleton("foo"), restrictedFooDataSource.getTableNames());
+ Assert.assertEquals(Collections.singleton("bar"), restrictedBarDataSource.getTableNames());
+ }
+
+ @Test
+ public void test_getChildren()
+ {
+ Assert.assertEquals(Collections.singletonList(fooDataSource), restrictedFooDataSource.getChildren());
+ Assert.assertEquals(Collections.singletonList(barDataSource), restrictedBarDataSource.getChildren());
+ }
+
+ @Test
+ public void test_isCacheable()
+ {
+ Assert.assertFalse(restrictedFooDataSource.isCacheable(true));
+ }
+
+ @Test
+ public void test_isGlobal()
+ {
+ Assert.assertFalse(restrictedFooDataSource.isGlobal());
+ }
+
+ @Test
+ public void test_isConcrete()
+ {
+ Assert.assertTrue(restrictedFooDataSource.isConcrete());
+ }
+
+ @Test
+ public void test_withChildren()
+ {
+ IllegalArgumentException exception = Assert.assertThrows(
+ IllegalArgumentException.class,
+ () -> restrictedFooDataSource.withChildren(Collections.emptyList())
+ );
+ Assert.assertEquals(exception.getMessage(), "Expected [1] child, got [0]");
+
+ IllegalArgumentException exception2 = Assert.assertThrows(
+ IllegalArgumentException.class,
+ () -> restrictedFooDataSource.withChildren(ImmutableList.of(fooDataSource, barDataSource))
+ );
+ Assert.assertEquals(exception2.getMessage(), "Expected [1] child, got [2]");
+
+ RestrictedDataSource newRestrictedDataSource = (RestrictedDataSource) restrictedFooDataSource.withChildren(
+ ImmutableList.of(barDataSource));
+ Assert.assertEquals(newRestrictedDataSource.getBase(), barDataSource);
+ }
+
+ @Test
+ public void test_withUpdatedDataSource()
+ {
+ RestrictedDataSource newRestrictedDataSource = (RestrictedDataSource) restrictedFooDataSource.withUpdatedDataSource(
+ new TableDataSource("bar"));
+ Assert.assertEquals(newRestrictedDataSource.getBase(), barDataSource);
+ }
+
+ @Test
+ public void test_withAnalysis()
+ {
+ Assert.assertEquals(restrictedFooDataSource.getAnalysis(), fooDataSource.getAnalysis());
+ Assert.assertEquals(restrictedBarDataSource.getAnalysis(), barDataSource.getAnalysis());
+ }
+
+ @Test
+ public void test_equals()
+ {
+ EqualsVerifier.forClass(RestrictedDataSource.class).usingGetClass().withNonnullFields("base").verify();
+ }
+
+ @Test
+ public void test_serde_roundTrip() throws Exception
+ {
+ final ObjectMapper jsonMapper = TestHelper.makeJsonMapper();
+ final RestrictedDataSource deserialized = (RestrictedDataSource) jsonMapper.readValue(
+ jsonMapper.writeValueAsString(restrictedFooDataSource),
+ DataSource.class
+ );
+
+ Assert.assertEquals(restrictedFooDataSource, deserialized);
+ }
+
+ @Test
+ public void test_deserialize_fromObject() throws Exception
+ {
+ final ObjectMapper jsonMapper = TestHelper.makeJsonMapper();
+ final RestrictedDataSource deserializedRestrictedDataSource = jsonMapper.readValue(
+ "{\"type\":\"restrict\",\"base\":{\"type\":\"table\",\"name\":\"foo\"},\"policy\":{\"type\":\"noRestriction\"}}",
+ RestrictedDataSource.class
+ );
+
+ Assert.assertEquals(
+ deserializedRestrictedDataSource,
+ RestrictedDataSource.create(fooDataSource, NoRestrictionPolicy.instance())
+ );
+ }
+
+
+ @Test
+ public void test_serialize() throws Exception
+ {
+ final ObjectMapper jsonMapper = TestHelper.makeJsonMapper();
+ final String s = jsonMapper.writeValueAsString(restrictedFooDataSource);
+
+ Assert.assertEquals(
+ "{\"type\":\"restrict\",\"base\":{\"type\":\"table\",\"name\":\"foo\"},\"policy\":{\"type\":\"row\",\"rowFilter\":{\"type\":\"true\"}}}",
+ s
+ );
+ }
+
+ @Test
+ public void testStringRep()
+ {
+ Assert.assertNotEquals(restrictedFooDataSource.toString(), restrictedBarDataSource.toString());
+ }
+}
diff --git a/processing/src/test/java/org/apache/druid/query/policy/NoRestrictionPolicyTest.java b/processing/src/test/java/org/apache/druid/query/policy/NoRestrictionPolicyTest.java
new file mode 100644
index 000000000000..0f20e11b416c
--- /dev/null
+++ b/processing/src/test/java/org/apache/druid/query/policy/NoRestrictionPolicyTest.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.query.policy;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import nl.jqno.equalsverifier.EqualsVerifier;
+import org.apache.druid.segment.CursorBuildSpec;
+import org.apache.druid.segment.TestHelper;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class NoRestrictionPolicyTest
+{
+ @Test
+ public void test_equals()
+ {
+ EqualsVerifier.forClass(NoRestrictionPolicy.class).usingGetClass().verify();
+ }
+
+ @Test
+ public void test_deserialize_fromString() throws Exception
+ {
+ ObjectMapper jsonMapper = TestHelper.makeJsonMapper();
+ Policy deserialized = jsonMapper.readValue("{\"type\":\"noRestriction\"}", Policy.class);
+ Assert.assertEquals(NoRestrictionPolicy.instance(), deserialized);
+ }
+
+ @Test
+ public void test_serde_roundTrip() throws Exception
+ {
+ final NoRestrictionPolicy policy = NoRestrictionPolicy.instance();
+ ObjectMapper jsonMapper = TestHelper.makeJsonMapper();
+ Policy deserialized = jsonMapper.readValue(jsonMapper.writeValueAsString(policy), Policy.class);
+ Assert.assertEquals(policy, deserialized);
+ }
+
+ @Test
+ public void testVisit()
+ {
+ final NoRestrictionPolicy policy = NoRestrictionPolicy.instance();
+ Assert.assertEquals(CursorBuildSpec.FULL_SCAN, policy.visit(CursorBuildSpec.FULL_SCAN));
+ }
+}
diff --git a/processing/src/test/java/org/apache/druid/query/policy/RowFilterPolicyTest.java b/processing/src/test/java/org/apache/druid/query/policy/RowFilterPolicyTest.java
new file mode 100644
index 000000000000..193843dbfcad
--- /dev/null
+++ b/processing/src/test/java/org/apache/druid/query/policy/RowFilterPolicyTest.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.query.policy;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.collect.ImmutableList;
+import nl.jqno.equalsverifier.EqualsVerifier;
+import org.apache.druid.query.filter.DimFilter;
+import org.apache.druid.query.filter.EqualityFilter;
+import org.apache.druid.query.filter.Filter;
+import org.apache.druid.segment.CursorBuildSpec;
+import org.apache.druid.segment.TestHelper;
+import org.apache.druid.segment.column.ColumnType;
+import org.apache.druid.segment.filter.AndFilter;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class RowFilterPolicyTest
+{
+ private static final RowFilterPolicy SIMPLE_ROW_POLICY = RowFilterPolicy.from(new EqualityFilter(
+ "col0",
+ ColumnType.STRING,
+ "val0",
+ null
+ ));
+
+ @Test
+ public void test_equals()
+ {
+ EqualsVerifier.forClass(RowFilterPolicy.class)
+ .usingGetClass()
+ .withNonnullFields(new String[]{"rowFilter"})
+ .verify();
+ }
+
+ @Test
+ public void test_deserialize_fromString() throws Exception
+ {
+ ObjectMapper jsonMapper = TestHelper.makeJsonMapper();
+ Policy deserialized = jsonMapper.readValue(
+ "{\"type\":\"row\",\"rowFilter\":{\"type\":\"equals\",\"column\":\"col0\",\"matchValueType\":\"STRING\",\"matchValue\":\"val0\"}}\n",
+ Policy.class
+ );
+ Assert.assertEquals(SIMPLE_ROW_POLICY, deserialized);
+ }
+
+ @Test
+ public void test_serde_roundTrip() throws Exception
+ {
+ ObjectMapper jsonMapper = TestHelper.makeJsonMapper();
+ Policy deserialized = jsonMapper.readValue(jsonMapper.writeValueAsString(SIMPLE_ROW_POLICY), Policy.class);
+ Assert.assertEquals(SIMPLE_ROW_POLICY, deserialized);
+ }
+
+ @Test
+ public void testVisit()
+ {
+ DimFilter policyFilter = new EqualityFilter("col", ColumnType.STRING, "val", null);
+ final RowFilterPolicy policy = RowFilterPolicy.from(policyFilter);
+
+ Assert.assertEquals(policyFilter, policy.visit(CursorBuildSpec.FULL_SCAN).getFilter());
+ }
+
+ @Test
+ public void testVisit_combineFilters()
+ {
+ Filter filter = new EqualityFilter("col0", ColumnType.STRING, "val0", null);
+ CursorBuildSpec spec = CursorBuildSpec.builder().setFilter(filter).build();
+
+ DimFilter policyFilter = new EqualityFilter("col", ColumnType.STRING, "val", null);
+ final RowFilterPolicy policy = RowFilterPolicy.from(policyFilter);
+
+ Filter expected = new AndFilter(ImmutableList.of(policyFilter.toFilter(), filter));
+ Assert.assertEquals(expected, policy.visit(spec).getFilter());
+ }
+}
diff --git a/server/src/main/java/org/apache/druid/metadata/SQLMetadataStorageActionHandler.java b/server/src/main/java/org/apache/druid/metadata/SQLMetadataStorageActionHandler.java
index eaf2efc82aaf..910e2e1b1633 100644
--- a/server/src/main/java/org/apache/druid/metadata/SQLMetadataStorageActionHandler.java
+++ b/server/src/main/java/org/apache/druid/metadata/SQLMetadataStorageActionHandler.java
@@ -119,8 +119,7 @@ public SQLMetadataStorageActionHandler(
this.connector = connector;
//fully qualified references required below due to identical package names across project modules.
//noinspection UnnecessaryFullyQualifiedName
- this.jsonMapper = jsonMapper.copy().addMixIn(org.apache.druid.metadata.PasswordProvider.class,
- org.apache.druid.metadata.PasswordProviderRedactionMixIn.class);
+ this.jsonMapper = jsonMapper.copy().addMixIn(PasswordProvider.class, PasswordProviderRedactionMixIn.class);
this.entryType = types.getEntryType();
this.statusType = types.getStatusType();
this.lockType = types.getLockType();
diff --git a/server/src/main/java/org/apache/druid/segment/metadata/AbstractSegmentMetadataCache.java b/server/src/main/java/org/apache/druid/segment/metadata/AbstractSegmentMetadataCache.java
index 99d965ec643e..589af8322b44 100644
--- a/server/src/main/java/org/apache/druid/segment/metadata/AbstractSegmentMetadataCache.java
+++ b/server/src/main/java/org/apache/druid/segment/metadata/AbstractSegmentMetadataCache.java
@@ -57,7 +57,7 @@
import org.apache.druid.server.QueryLifecycleFactory;
import org.apache.druid.server.coordination.DruidServerMetadata;
import org.apache.druid.server.coordination.ServerType;
-import org.apache.druid.server.security.Access;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.Escalator;
import org.apache.druid.timeline.DataSegment;
import org.apache.druid.timeline.SegmentId;
@@ -975,7 +975,8 @@ public Sequence runSegmentMetadataQuery(
return queryLifecycleFactory
.factorize()
- .runSimple(segmentMetadataQuery, escalator.createEscalatedAuthenticationResult(), Access.OK).getResults();
+ .runSimple(segmentMetadataQuery, escalator.createEscalatedAuthenticationResult(), AuthorizationResult.ALLOW_NO_RESTRICTION)
+ .getResults();
}
@VisibleForTesting
diff --git a/server/src/main/java/org/apache/druid/segment/metadata/SegmentMetadataQuerySegmentWalker.java b/server/src/main/java/org/apache/druid/segment/metadata/SegmentMetadataQuerySegmentWalker.java
index 6e721b552ca0..bc66fbf56d69 100644
--- a/server/src/main/java/org/apache/druid/segment/metadata/SegmentMetadataQuerySegmentWalker.java
+++ b/server/src/main/java/org/apache/druid/segment/metadata/SegmentMetadataQuerySegmentWalker.java
@@ -96,7 +96,7 @@ public QueryRunner getQueryRunnerForIntervals(Query query, Iterable QueryRunner getQueryRunnerForSegments(Query query, Iterable specs)
{
- return decorateRunner(query, new QueryRunner<>()
+ return decorateRunner(query, new QueryRunner()
{
@Override
public Sequence run(final QueryPlus queryPlus, final ResponseContext responseContext)
@@ -142,7 +142,11 @@ private Sequence run(
final TimelineLookup timelineLookup = timelineConverter.apply(timeline);
QueryToolChest> toolChest = conglomerate.getToolChest(query);
- Set> segmentAndServers = computeSegmentsToQuery(timelineLookup, query, toolChest);
+ Set> segmentAndServers = computeSegmentsToQuery(
+ timelineLookup,
+ query,
+ toolChest
+ );
queryPlus = queryPlus.withQueryMetrics(toolChest);
queryPlus.getQueryMetrics().reportQueriedSegmentCount(segmentAndServers.size()).emit(emitter);
@@ -181,7 +185,8 @@ Sequence getServerResults(
QueryPlus queryPlus,
ResponseContext responseContext,
long maxQueuedBytesPerServer,
- List segmentDescriptors)
+ List segmentDescriptors
+ )
{
return serverRunner.run(
queryPlus.withQuery(
@@ -207,7 +212,10 @@ private Set> computeSegmentsToQuery
List> timelineObjectHolders =
intervals.stream().flatMap(i -> lookupFn.apply(i).stream()).collect(Collectors.toList());
- final List> serversLookup = toolChest.filterSegments(query, timelineObjectHolders);
+ final List> serversLookup = toolChest.filterSegments(
+ query,
+ timelineObjectHolders
+ );
Set> segmentAndServers = new HashSet<>();
for (TimelineObjectHolder holder : serversLookup) {
@@ -252,8 +260,6 @@ private SortedMap> groupSegmentsByServer(
private Sequence merge(Query query, List> sequencesByInterval)
{
- return Sequences
- .simple(sequencesByInterval)
- .flatMerge(seq -> seq, query.getResultOrdering());
+ return Sequences.simple(sequencesByInterval).flatMerge(seq -> seq, query.getResultOrdering());
}
}
diff --git a/server/src/main/java/org/apache/druid/segment/realtime/ChatHandlers.java b/server/src/main/java/org/apache/druid/segment/realtime/ChatHandlers.java
index 4d971db81e08..b37bf7991a81 100644
--- a/server/src/main/java/org/apache/druid/segment/realtime/ChatHandlers.java
+++ b/server/src/main/java/org/apache/druid/segment/realtime/ChatHandlers.java
@@ -19,8 +19,8 @@
package org.apache.druid.segment.realtime;
-import org.apache.druid.server.security.Access;
import org.apache.druid.server.security.Action;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.AuthorizationUtils;
import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.ForbiddenException;
@@ -37,7 +37,7 @@ public class ChatHandlers
*
* @return authorization result
*/
- public static Access authorizationCheck(
+ public static AuthorizationResult authorizationCheck(
HttpServletRequest req,
Action action,
String dataSource,
@@ -49,11 +49,11 @@ public static Access authorizationCheck(
action
);
- Access access = AuthorizationUtils.authorizeResourceAction(req, resourceAction, authorizerMapper);
- if (!access.isAllowed()) {
- throw new ForbiddenException(access.toString());
+ AuthorizationResult authResult = AuthorizationUtils.authorizeResourceAction(req, resourceAction, authorizerMapper);
+ if (!authResult.allowAccessWithNoRestriction()) {
+ throw new ForbiddenException(authResult.getErrorMessage());
}
- return access;
+ return authResult;
}
}
diff --git a/server/src/main/java/org/apache/druid/server/QueryLifecycle.java b/server/src/main/java/org/apache/druid/server/QueryLifecycle.java
index 292ad958e509..6ffc8aff0bba 100644
--- a/server/src/main/java/org/apache/druid/server/QueryLifecycle.java
+++ b/server/src/main/java/org/apache/druid/server/QueryLifecycle.java
@@ -24,6 +24,7 @@
import com.google.common.base.Strings;
import com.google.common.collect.Iterables;
import org.apache.druid.client.DirectDruidClient;
+import org.apache.druid.error.DruidException;
import org.apache.druid.java.util.common.DateTimes;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.java.util.common.StringUtils;
@@ -47,12 +48,13 @@
import org.apache.druid.query.QueryTimeoutException;
import org.apache.druid.query.QueryToolChest;
import org.apache.druid.query.context.ResponseContext;
+import org.apache.druid.query.metadata.metadata.SegmentMetadataQuery;
import org.apache.druid.server.QueryResource.ResourceIOReaderWriter;
import org.apache.druid.server.log.RequestLogger;
-import org.apache.druid.server.security.Access;
import org.apache.druid.server.security.Action;
import org.apache.druid.server.security.AuthConfig;
import org.apache.druid.server.security.AuthenticationResult;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.AuthorizationUtils;
import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.Resource;
@@ -79,7 +81,10 @@
* Execution ({@link #execute()}
* Logging ({@link #emitLogsAndMetrics(Throwable, String, long)}
*
+ * Alternatively, if the request is already authenticated and authorized, just call
+ * {@link #runSimple(Query, AuthenticationResult, AuthorizationResult)}.
*
+ *
* This object is not thread-safe.
*/
public class QueryLifecycle
@@ -135,17 +140,19 @@ public QueryLifecycle(
* For callers who have already authorized their query, and where simplicity is desired over flexibility. This method
* does it all in one call. Logs and metrics are emitted when the Sequence is either fully iterated or throws an
* exception.
+ *
+ * The {@code state} transitions from NEW, to INITIALIZED, to AUTHORIZING, to AUTHORIZED, to EXECUTING, then DONE.
*
- * @param query the query
- * @param authenticationResult authentication result indicating identity of the requester
- * @param authorizationResult authorization result of requester
- *
+ * @param query the query
+ * @param authenticationResult authentication result indicating identity of the requester
+ * @param authorizationResult authorization result of requester
* @return results
+ * @throws DruidException if the given authorizationResult deny access, which indicates a bug
*/
public QueryResponse runSimple(
final Query query,
final AuthenticationResult authenticationResult,
- final Access authorizationResult
+ final AuthorizationResult authorizationResult
)
{
initialize(query);
@@ -155,10 +162,6 @@ public QueryResponse runSimple(
final QueryResponse queryResponse;
try {
preAuthorized(authenticationResult, authorizationResult);
- if (!authorizationResult.isAllowed()) {
- throw new ISE(Access.DEFAULT_ERROR_MESSAGE);
- }
-
queryResponse = execute();
results = queryResponse.getResults();
}
@@ -191,8 +194,11 @@ public void after(final boolean isDone, final Throwable thrown)
/**
* Initializes this object to execute a specific query. Does not actually execute the query.
+ *
+ * The {@code state} transitions from NEW, to INITIALIZED.
*
* @param baseQuery the query
+ * @throws DruidException if the current state is not NEW, which indicates a bug
*/
public void initialize(final Query> baseQuery)
{
@@ -204,21 +210,30 @@ public void initialize(final Query> baseQuery)
queryId = UUID.randomUUID().toString();
}
- Map mergedUserAndConfigContext = QueryContexts.override(defaultQueryConfig.getContext(), baseQuery.getContext());
+ Map mergedUserAndConfigContext = QueryContexts.override(
+ defaultQueryConfig.getContext(),
+ baseQuery.getContext()
+ );
mergedUserAndConfigContext.put(BaseQuery.QUERY_ID, queryId);
this.baseQuery = baseQuery.withOverriddenContext(mergedUserAndConfigContext);
this.toolChest = conglomerate.getToolChest(this.baseQuery);
}
/**
- * Authorize the query. Will return an Access object denoting whether the query is authorized or not.
- *
- * @param req HTTP request object of the request. If provided, the auth-related fields in the HTTP request
- * will be automatically set.
+ * Returns {@link AuthorizationResult} based on {@code DRUID_AUTHENTICATION_RESULT} in the given request, base query
+ * would be transformed with restrictions on the AuthorizationResult.
+ *
+ * The {@code state} transitions from INITIALIZED, to AUTHORIZING, then to AUTHORIZED or UNAUTHORIZED.
+ *
+ * Note this won't throw exception if authorization deny access or impose policy restrictions. It is the caller's
+ * responsibility to throw exception on denial and impose policy restriction.
*
- * @return authorization result
+ * @param req HTTP request to be authorized. The auth-related fields in the HTTP request will be set.
+ * @return authorization result denoting whether the query is authorized or not, along with policy restrictions
+ * @throws IllegalStateException if the request was not authenticated
+ * @throws DruidException if the current state is not INITIALIZED, which indicates a bug
*/
- public Access authorize(HttpServletRequest req)
+ public AuthorizationResult authorize(HttpServletRequest req)
{
transition(State.INITIALIZED, State.AUTHORIZING);
final Iterable resourcesToAuthorize = Iterables.concat(
@@ -242,14 +257,21 @@ public Access authorize(HttpServletRequest req)
}
/**
- * Authorize the query using the authentication result.
- * Will return an Access object denoting whether the query is authorized or not.
+ * Returns {@link AuthorizationResult} based on the given {@link AuthenticationResult}, base query would be
+ * transformed with restrictions on the AuthorizationResult.
+ *
+ * The {@code state} transitions from INITIALIZED, to AUTHORIZING, then to AUTHORIZED or UNAUTHORIZED.
+ *
+ * Note this won't throw exception if authorization deny access or impose policy restrictions. It is the caller's
+ * responsibility to throw exception on denial and impose policy restriction.
+ *
* This method is to be used by the grpc-query-extension.
*
* @param authenticationResult authentication result indicating identity of the requester
- * @return authorization result of requester
+ * @return authorization result denoting whether the query is authorized or not, along with policy restrictions.
+ * @throws DruidException if the current state is not INITIALIZED, which indicates a bug
*/
- public Access authorize(AuthenticationResult authenticationResult)
+ public AuthorizationResult authorize(AuthenticationResult authenticationResult)
{
transition(State.INITIALIZED, State.AUTHORIZING);
final Iterable resourcesToAuthorize = Iterables.concat(
@@ -272,36 +294,56 @@ public Access authorize(AuthenticationResult authenticationResult)
);
}
- private void preAuthorized(final AuthenticationResult authenticationResult, final Access access)
+ private void preAuthorized(
+ final AuthenticationResult authenticationResult,
+ final AuthorizationResult authorizationResult
+ )
{
- // gotta transition those states, even if we are already authorized
+ // The authorization have already been checked previously (or skipped). This just follows the state transition
+ // process, should not throw unauthorized error.
transition(State.INITIALIZED, State.AUTHORIZING);
- doAuthorize(authenticationResult, access);
+ doAuthorize(authenticationResult, authorizationResult);
+ if (!state.equals(State.AUTHORIZED)) {
+ throw DruidException.defensive("Unexpected state [%s], expecting [%s].", state, State.AUTHORIZED);
+ }
}
- private Access doAuthorize(final AuthenticationResult authenticationResult, final Access authorizationResult)
+ private AuthorizationResult doAuthorize(
+ final AuthenticationResult authenticationResult,
+ final AuthorizationResult authorizationResult
+ )
{
Preconditions.checkNotNull(authenticationResult, "authenticationResult");
Preconditions.checkNotNull(authorizationResult, "authorizationResult");
- if (!authorizationResult.isAllowed()) {
+ if (!authorizationResult.allowBasicAccess()) {
// Not authorized; go straight to Jail, do not pass Go.
transition(State.AUTHORIZING, State.UNAUTHORIZED);
} else {
transition(State.AUTHORIZING, State.AUTHORIZED);
+ if (this.baseQuery instanceof SegmentMetadataQuery && authorizationResult.allowAccessWithNoRestriction()) {
+ // skip restrictions mapping for SegmentMetadataQuery from user with no restriction
+ } else {
+ this.baseQuery = this.baseQuery.withDataSource(this.baseQuery.getDataSource()
+ .withPolicies(authorizationResult.getPolicyMap()));
+ }
}
this.authenticationResult = authenticationResult;
-
return authorizationResult;
}
/**
- * Execute the query. Can only be called if the query has been authorized. Note that query logs and metrics will
- * not be emitted automatically when the Sequence is fully iterated. It is the caller's responsibility to call
- * {@link #emitLogsAndMetrics(Throwable, String, long)} to emit logs and metrics.
+ * Executes the query.
+ *
+ * Note that query logs and metrics will not be emitted automatically when the Sequence is fully iterated withou. It
+ * is the caller's responsibility to call {@link #emitLogsAndMetrics(Throwable, String, long)} to emit logs and
+ * metrics.
+ *
+ * The {@code state} transitions from AUTHORIZED, to EXECUTING.
*
* @return result sequence and response context
+ * @throws DruidException if the current state is not AUTHORIZED, which indicates a bug
*/
public QueryResponse execute()
{
@@ -311,14 +353,18 @@ public QueryResponse execute()
@SuppressWarnings("unchecked")
final Sequence res = QueryPlus.wrap((Query) baseQuery)
- .withIdentity(authenticationResult.getIdentity())
- .run(texasRanger, responseContext);
+ .withIdentity(authenticationResult.getIdentity())
+ .run(texasRanger, responseContext);
return new QueryResponse<>(res == null ? Sequences.empty() : res, responseContext);
}
/**
- * Emit logs and metrics for this query.
+ * Emits logs and metrics for this query.
+ *
+ * The {@code state} transitions to DONE. The initial state can be anything, but it likely shouldn't be set to DONE.
+ *
+ * If {@code baseQuery} is null, likely because {@link #initialize(Query)} was never call, do nothing.
*
* @param e exception that occurred while processing this query
* @param remoteAddress remote address, for logging; or null if unknown
@@ -455,7 +501,7 @@ public QueryToolChest getToolChest()
private void transition(final State from, final State to)
{
if (state != from) {
- throw new ISE("Cannot transition from[%s] to[%s].", from, to);
+ throw DruidException.defensive("Cannot transition from[%s] to[%s], current state[%s].", from, to, state);
}
state = to;
diff --git a/server/src/main/java/org/apache/druid/server/QueryResource.java b/server/src/main/java/org/apache/druid/server/QueryResource.java
index 06104000b1ca..93a52abd0643 100644
--- a/server/src/main/java/org/apache/druid/server/QueryResource.java
+++ b/server/src/main/java/org/apache/druid/server/QueryResource.java
@@ -49,8 +49,8 @@
import org.apache.druid.query.context.ResponseContext;
import org.apache.druid.query.context.ResponseContext.Keys;
import org.apache.druid.server.metrics.QueryCountStatsProvider;
-import org.apache.druid.server.security.Access;
import org.apache.druid.server.security.AuthConfig;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.AuthorizationUtils;
import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.ForbiddenException;
@@ -152,14 +152,14 @@ public Response cancelQuery(@PathParam("id") String queryId, @Context final Http
datasources = new TreeSet<>();
}
- Access authResult = AuthorizationUtils.authorizeAllResourceActions(
+ AuthorizationResult authResult = AuthorizationUtils.authorizeAllResourceActions(
req,
Iterables.transform(datasources, AuthorizationUtils.DATASOURCE_WRITE_RA_GENERATOR),
authorizerMapper
);
- if (!authResult.isAllowed()) {
- throw new ForbiddenException(authResult.toString());
+ if (!authResult.allowAccessWithNoRestriction()) {
+ throw new ForbiddenException(authResult.getErrorMessage());
}
queryScheduler.cancelQuery(queryId);
@@ -198,7 +198,7 @@ public Response doPost(
log.debug("Got query [%s]", queryLifecycle.getQuery());
}
- final Access authResult;
+ final AuthorizationResult authResult;
try {
authResult = queryLifecycle.authorize(req);
}
@@ -214,8 +214,8 @@ public Response doPost(
return io.getResponseWriter().buildNonOkResponse(qe.getFailType().getExpectedStatus(), qe);
}
- if (!authResult.isAllowed()) {
- throw new ForbiddenException(authResult.toString());
+ if (!authResult.allowBasicAccess()) {
+ throw new ForbiddenException(authResult.getErrorMessage());
}
final QueryResourceQueryResultPusher pusher = new QueryResourceQueryResultPusher(req, queryLifecycle, io);
diff --git a/server/src/main/java/org/apache/druid/server/http/security/ConfigResourceFilter.java b/server/src/main/java/org/apache/druid/server/http/security/ConfigResourceFilter.java
index 7a45ca1d5bbb..9332c0944779 100644
--- a/server/src/main/java/org/apache/druid/server/http/security/ConfigResourceFilter.java
+++ b/server/src/main/java/org/apache/druid/server/http/security/ConfigResourceFilter.java
@@ -21,7 +21,7 @@
import com.google.inject.Inject;
import com.sun.jersey.spi.container.ContainerRequest;
-import org.apache.druid.server.security.Access;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.AuthorizationUtils;
import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.ForbiddenException;
@@ -56,14 +56,14 @@ public ContainerRequest filter(ContainerRequest request)
getAction(request)
);
- final Access authResult = AuthorizationUtils.authorizeResourceAction(
+ final AuthorizationResult authResult = AuthorizationUtils.authorizeResourceAction(
getReq(),
resourceAction,
getAuthorizerMapper()
);
- if (!authResult.isAllowed()) {
- throw new ForbiddenException(authResult.toString());
+ if (!authResult.allowAccessWithNoRestriction()) {
+ throw new ForbiddenException(authResult.getErrorMessage());
}
return request;
diff --git a/server/src/main/java/org/apache/druid/server/http/security/DatasourceResourceFilter.java b/server/src/main/java/org/apache/druid/server/http/security/DatasourceResourceFilter.java
index 2e84e5bd1f38..cbf13cb9cc3b 100644
--- a/server/src/main/java/org/apache/druid/server/http/security/DatasourceResourceFilter.java
+++ b/server/src/main/java/org/apache/druid/server/http/security/DatasourceResourceFilter.java
@@ -23,7 +23,7 @@
import com.google.common.collect.Iterables;
import com.google.inject.Inject;
import com.sun.jersey.spi.container.ContainerRequest;
-import org.apache.druid.server.security.Access;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.AuthorizationUtils;
import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.ForbiddenException;
@@ -57,14 +57,14 @@ public ContainerRequest filter(ContainerRequest request)
getAction(request)
);
- final Access authResult = AuthorizationUtils.authorizeResourceAction(
+ final AuthorizationResult authResult = AuthorizationUtils.authorizeResourceAction(
getReq(),
resourceAction,
getAuthorizerMapper()
);
- if (!authResult.isAllowed()) {
- throw new ForbiddenException(authResult.toString());
+ if (!authResult.allowAccessWithNoRestriction()) {
+ throw new ForbiddenException(authResult.getErrorMessage());
}
return request;
diff --git a/server/src/main/java/org/apache/druid/server/http/security/RulesResourceFilter.java b/server/src/main/java/org/apache/druid/server/http/security/RulesResourceFilter.java
index e48c38e0260d..74831d174d71 100644
--- a/server/src/main/java/org/apache/druid/server/http/security/RulesResourceFilter.java
+++ b/server/src/main/java/org/apache/druid/server/http/security/RulesResourceFilter.java
@@ -24,7 +24,7 @@
import com.google.common.collect.Iterables;
import com.google.inject.Inject;
import com.sun.jersey.spi.container.ContainerRequest;
-import org.apache.druid.server.security.Access;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.AuthorizationUtils;
import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.ForbiddenException;
@@ -38,8 +38,8 @@
/**
* Use this ResourceFilter when the datasource information is present after "rules" segment in the request Path
* Here are some example paths where this filter is used -
- * - druid/coordinator/v1/rules/
- * */
+ * - druid/coordinator/v1/rules/
+ */
public class RulesResourceFilter extends AbstractResourceFilter
{
@@ -75,14 +75,14 @@ public boolean apply(PathSegment input)
getAction(request)
);
- final Access authResult = AuthorizationUtils.authorizeResourceAction(
+ final AuthorizationResult authResult = AuthorizationUtils.authorizeResourceAction(
getReq(),
resourceAction,
getAuthorizerMapper()
);
- if (!authResult.isAllowed()) {
- throw new ForbiddenException(authResult.toString());
+ if (!authResult.allowAccessWithNoRestriction()) {
+ throw new ForbiddenException(authResult.getErrorMessage());
}
return request;
diff --git a/server/src/main/java/org/apache/druid/server/http/security/StateResourceFilter.java b/server/src/main/java/org/apache/druid/server/http/security/StateResourceFilter.java
index 3a2d0e3bf83c..a2168850ffd5 100644
--- a/server/src/main/java/org/apache/druid/server/http/security/StateResourceFilter.java
+++ b/server/src/main/java/org/apache/druid/server/http/security/StateResourceFilter.java
@@ -21,13 +21,14 @@
import com.google.inject.Inject;
import com.sun.jersey.spi.container.ContainerRequest;
-import org.apache.druid.server.security.Access;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.AuthorizationUtils;
import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.ForbiddenException;
import org.apache.druid.server.security.Resource;
import org.apache.druid.server.security.ResourceAction;
+
/**
* Use this ResourceFilter at end points where Druid Cluster State is read or written
* Here are some example paths where this filter is used -
@@ -59,14 +60,14 @@ public ContainerRequest filter(ContainerRequest request)
getAction(request)
);
- final Access authResult = AuthorizationUtils.authorizeResourceAction(
+ final AuthorizationResult authResult = AuthorizationUtils.authorizeResourceAction(
getReq(),
resourceAction,
getAuthorizerMapper()
);
- if (!authResult.isAllowed()) {
- throw new ForbiddenException(authResult.toString());
+ if (!authResult.allowAccessWithNoRestriction()) {
+ throw new ForbiddenException(authResult.getErrorMessage());
}
return request;
diff --git a/server/src/main/java/org/apache/druid/server/security/Access.java b/server/src/main/java/org/apache/druid/server/security/Access.java
index 706a78329062..456222c1dac5 100644
--- a/server/src/main/java/org/apache/druid/server/security/Access.java
+++ b/server/src/main/java/org/apache/druid/server/security/Access.java
@@ -21,52 +21,111 @@
import com.google.common.base.Strings;
import org.apache.druid.java.util.common.StringUtils;
+import org.apache.druid.query.policy.Policy;
+import javax.annotation.Nullable;
+import java.util.Optional;
+
+/**
+ * Represents the outcome of verifying permissions to perform an {@link Action} on a {@link Resource}, along with any
+ * policy restrictions.
+ */
public class Access
{
public static final String DEFAULT_ERROR_MESSAGE = "Unauthorized";
+ public static final String DEFAULT_AUTHORIZED_MESSAGE = "Authorized";
- public static final Access OK = new Access(true);
- public static final Access DENIED = new Access(false);
+ public static final Access OK = allow();
+ public static final Access DENIED = deny("");
private final boolean allowed;
private final String message;
+ // A policy restriction on top of table-level read access. It should be empty if there are no policy restrictions
+ // or if access is requested for an action other than reading the table.
+ private final Optional policy; // should this be a list?
+ /**
+ * @deprecated use {@link #allow()} or {@link #deny(String)} instead
+ */
+ @Deprecated
public Access(boolean allowed)
{
- this(allowed, "");
+ this(allowed, "", Optional.empty());
}
+ /**
+ * @deprecated use {@link #allow()} or {@link #deny(String)} instead
+ */
+ @Deprecated
public Access(boolean allowed, String message)
+ {
+ this(allowed, message, Optional.empty());
+ }
+
+ Access(boolean allowed, String message, Optional policy)
{
this.allowed = allowed;
this.message = message;
+ this.policy = policy;
+ }
+
+ /**
+ * Constructs {@link Access} instance with access allowed, with no policy restriction.
+ */
+ public static Access allow()
+ {
+ return new Access(true, "", Optional.empty());
+ }
+
+ /**
+ * Contructs {@link Access} instance with access denied.
+ */
+ public static Access deny(@Nullable String message)
+ {
+ return new Access(false, StringUtils.nullToEmptyNonDruidDataString(message), null);
+ }
+
+ /**
+ * Contructs {@link Access} instance with access allowed, but with policy restriction.
+ */
+ public static Access allowWithRestriction(Policy policy)
+ {
+ return new Access(true, "", Optional.of(policy));
}
+ /**
+ * Returns true if access allowed, ignoring any policy restrictions.
+ */
public boolean isAllowed()
{
return allowed;
}
- public String getMessage()
+ public Optional getPolicy()
{
- return message;
+ return policy;
}
- public String toMessage()
+ public String getMessage()
{
+ StringBuilder stringBuilder = new StringBuilder();
+ stringBuilder.append(allowed ? DEFAULT_AUTHORIZED_MESSAGE : DEFAULT_ERROR_MESSAGE);
if (!Strings.isNullOrEmpty(message)) {
- return toString();
- } else if (allowed) {
- return "Authorized";
- } else {
- return DEFAULT_ERROR_MESSAGE;
+ stringBuilder.append(", ");
+ stringBuilder.append(message);
}
+ if (allowed && policy.isPresent()) {
+ stringBuilder.append(", with restriction [");
+ stringBuilder.append(policy.get());
+ stringBuilder.append("]");
+ }
+ return stringBuilder.toString();
}
@Override
public String toString()
{
- return StringUtils.format("Allowed:%s, Message:%s", allowed, message);
+ return StringUtils.format("Allowed:%s, Message:%s, Policy: %s", allowed, message, policy);
}
+
}
diff --git a/server/src/main/java/org/apache/druid/server/security/AuthConfig.java b/server/src/main/java/org/apache/druid/server/security/AuthConfig.java
index 8413155a8f3c..4ad6325d8e45 100644
--- a/server/src/main/java/org/apache/druid/server/security/AuthConfig.java
+++ b/server/src/main/java/org/apache/druid/server/security/AuthConfig.java
@@ -118,8 +118,8 @@ public AuthConfig(
this.allowUnauthenticatedHttpOptions = allowUnauthenticatedHttpOptions;
this.authorizeQueryContextParams = authorizeQueryContextParams;
this.unsecuredContextKeys = unsecuredContextKeys == null
- ? Collections.emptySet()
- : unsecuredContextKeys;
+ ? Collections.emptySet()
+ : unsecuredContextKeys;
this.securedContextKeys = securedContextKeys;
this.enableInputSourceSecurity = enableInputSourceSecurity;
}
diff --git a/server/src/main/java/org/apache/druid/server/security/AuthorizationResult.java b/server/src/main/java/org/apache/druid/server/security/AuthorizationResult.java
new file mode 100644
index 000000000000..d0e166b4e92a
--- /dev/null
+++ b/server/src/main/java/org/apache/druid/server/security/AuthorizationResult.java
@@ -0,0 +1,238 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.server.security;
+
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
+import org.apache.druid.error.DruidException;
+import org.apache.druid.query.policy.NoRestrictionPolicy;
+import org.apache.druid.query.policy.Policy;
+
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+
+/**
+ * Represents the outcoming of performing authorization check on required resource accesses on a query or http requests.
+ * It contains:
+ *
+ * a boolean allow or deny access results for checking permissions on a list of resource actions.
+ * a failure message if deny access. It's null when access is allowed.
+ * a map of table name with optional {@link Policy} restriction. An empty value means there's no restriction
+ * enforced on the table.
+ *
+ */
+public class AuthorizationResult
+{
+ /**
+ * Provides access with no restrictions to all resources.This should be limited to Druid internal systems or
+ * superusers, except in cases where granular ACL considerations are not a priority.
+ */
+ public static final AuthorizationResult ALLOW_NO_RESTRICTION = new AuthorizationResult(
+ Permission.ALLOW_NO_RESTRICTION,
+ null,
+ Collections.emptyMap()
+ );
+
+ /**
+ * Provides a default deny access result.
+ */
+ public static final AuthorizationResult DENY = new AuthorizationResult(
+ Permission.DENY,
+ Access.DENIED.getMessage(),
+ Collections.emptyMap()
+ );
+
+ enum Permission
+ {
+ ALLOW_NO_RESTRICTION,
+ ALLOW_WITH_RESTRICTION,
+ DENY
+ }
+
+ private final Permission permission;
+
+ @Nullable
+ private final String failureMessage;
+
+ private final Map> policyRestrictions;
+
+ AuthorizationResult(
+ Permission permission,
+ @Nullable String failureMessage,
+ Map> policyRestrictions
+ )
+ {
+ this.permission = permission;
+ this.failureMessage = failureMessage;
+ this.policyRestrictions = policyRestrictions;
+
+ // sanity check
+ switch (permission) {
+ case DENY:
+ validateFailureMessageIsSet();
+ validatePolicyRestrictionEmpty();
+ return;
+ case ALLOW_WITH_RESTRICTION:
+ validateFailureMessageNull();
+ validatePolicyRestrictionNonEmpty();
+ return;
+ case ALLOW_NO_RESTRICTION:
+ validateFailureMessageNull();
+ validatePolicyRestrictionEmpty();
+ return;
+ default:
+ throw DruidException.defensive("unreachable");
+ }
+ }
+
+ public static AuthorizationResult deny(@Nonnull String failureMessage)
+ {
+ return new AuthorizationResult(Permission.DENY, failureMessage, Collections.emptyMap());
+ }
+
+ public static AuthorizationResult allowWithRestriction(Map> policyRestrictions)
+ {
+ if (policyRestrictions.isEmpty()) {
+ return ALLOW_NO_RESTRICTION;
+ }
+ return new AuthorizationResult(Permission.ALLOW_WITH_RESTRICTION, null, policyRestrictions);
+ }
+
+ /**
+ * Returns true if user has basic access.
+ */
+ public boolean allowBasicAccess()
+ {
+ return Permission.ALLOW_NO_RESTRICTION.equals(permission) || Permission.ALLOW_WITH_RESTRICTION.equals(permission);
+ }
+
+ /**
+ * Returns true if user has all required permission, and the policy restrictions indicates one of the following:
+ * no policy found
+ * the user has a no-restriction policy
+ */
+ public boolean allowAccessWithNoRestriction()
+ {
+ return Permission.ALLOW_NO_RESTRICTION.equals(permission) || (Permission.ALLOW_WITH_RESTRICTION.equals(permission)
+ && policyRestrictions.values()
+ .stream()
+ .flatMap(Optional::stream)
+ .allMatch(p -> (p instanceof NoRestrictionPolicy)));
+ }
+
+ /**
+ * Returns an error string if the AuthorizationResult doesn't permit all requried access.
+ */
+ public String getErrorMessage()
+ {
+ switch (permission) {
+ case DENY:
+ return Objects.requireNonNull(failureMessage);
+ case ALLOW_WITH_RESTRICTION:
+ if (!allowAccessWithNoRestriction()) {
+ return Access.DEFAULT_ERROR_MESSAGE;
+ }
+ default:
+ throw DruidException.defensive("unreachable");
+ }
+ }
+
+ /**
+ * Returns a map of table and {@link Policy} restriction on the table. Empty value means the table doesn't have any
+ * restriction.
+ */
+ public Map> getPolicyMap()
+ {
+ return policyRestrictions;
+ }
+
+ @Override
+ public boolean equals(Object o)
+ {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ AuthorizationResult that = (AuthorizationResult) o;
+ return Objects.equals(permission, that.permission) &&
+ Objects.equals(failureMessage, that.failureMessage) &&
+ Objects.equals(policyRestrictions, that.policyRestrictions);
+ }
+
+ @Override
+ public int hashCode()
+ {
+ return Objects.hash(permission, failureMessage, policyRestrictions);
+ }
+
+ @Override
+ public String toString()
+ {
+ return "AuthorizationResult [permission="
+ + permission
+ + ", failureMessage="
+ + failureMessage
+ + ", policyRestrictions="
+ + policyRestrictions
+ + "]";
+ }
+
+ private void validateFailureMessageIsSet()
+ {
+ Preconditions.checkArgument(
+ !Strings.isNullOrEmpty(failureMessage),
+ "Failure message must be set for permission[%s]",
+ permission
+ );
+ }
+
+ private void validateFailureMessageNull()
+ {
+ Preconditions.checkArgument(
+ failureMessage == null,
+ "Failure message must be null for permission[%s]",
+ permission
+ );
+ }
+
+ private void validatePolicyRestrictionEmpty()
+ {
+ Preconditions.checkArgument(
+ policyRestrictions.isEmpty(),
+ "Policy restrictions not allowed for permission[%s]",
+ permission
+ );
+ }
+
+ private void validatePolicyRestrictionNonEmpty()
+ {
+ Preconditions.checkArgument(
+ !policyRestrictions.isEmpty(),
+ "Policy restrictions must exist for permission[%s]",
+ permission
+ );
+ }
+}
diff --git a/server/src/main/java/org/apache/druid/server/security/AuthorizationUtils.java b/server/src/main/java/org/apache/druid/server/security/AuthorizationUtils.java
index 431819da8a42..28543b1a5c86 100644
--- a/server/src/main/java/org/apache/druid/server/security/AuthorizationUtils.java
+++ b/server/src/main/java/org/apache/druid/server/security/AuthorizationUtils.java
@@ -20,6 +20,7 @@
package org.apache.druid.server.security;
import com.google.common.base.Function;
+import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import org.apache.druid.audit.AuditInfo;
@@ -27,6 +28,7 @@
import org.apache.druid.audit.RequestInfo;
import org.apache.druid.error.DruidException;
import org.apache.druid.java.util.common.ISE;
+import org.apache.druid.query.policy.Policy;
import javax.servlet.http.HttpServletRequest;
import java.util.ArrayList;
@@ -35,6 +37,7 @@
import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Optional;
import java.util.Set;
/**
@@ -42,22 +45,22 @@
*/
public class AuthorizationUtils
{
+ static final ImmutableSet RESTRICTION_APPLICABLE_RESOURCE_TYPES = ImmutableSet.of(
+ ResourceType.DATASOURCE
+ );
+
/**
- * Check a resource-action using the authorization fields from the request.
- *
- * Otherwise, if the resource-actions is authorized, return ACCESS_OK.
- *
- * This function will set the DRUID_AUTHORIZATION_CHECKED attribute in the request.
- *
- * If this attribute is already set when this function is called, an exception is thrown.
+ * Performs authorization check on a single resource-action based on the authentication fields from the request.
+ *
+ * This function will set the DRUID_AUTHORIZATION_CHECKED attribute in the request. If this attribute is already set
+ * when this function is called, an exception is thrown.
*
* @param request HTTP request to be authorized
* @param resourceAction A resource identifier and the action to be taken the resource.
* @param authorizerMapper The singleton AuthorizerMapper instance
- *
- * @return ACCESS_OK or the failed Access object returned by the Authorizer that checked the request.
+ * @return AuthorizationResult containing allow/deny access to the resource action, along with policy restrictions.
*/
- public static Access authorizeResourceAction(
+ public static AuthorizationResult authorizeResourceAction(
final HttpServletRequest request,
final ResourceAction resourceAction,
final AuthorizerMapper authorizerMapper
@@ -74,9 +77,7 @@ public static Access authorizeResourceAction(
* Returns the authentication information for a request.
*
* @param request http request
- *
* @return authentication result
- *
* @throws IllegalStateException if the request was not authenticated
*/
public static AuthenticationResult authenticationResultFromRequest(final HttpServletRequest request)
@@ -145,19 +146,15 @@ public static RequestInfo buildRequestInfo(String service, HttpServletRequest re
}
/**
- * Check a list of resource-actions to be performed by the identity represented by authenticationResult.
- *
- * If one of the resource-actions fails the authorization check, this method returns the failed
- * Access object from the check.
- *
- * Otherwise, return ACCESS_OK if all resource-actions were successfully authorized.
+ * Performs authorization check on a list of resource-actions based on the authenticationResult.
+ *
+ * If one of the resource-actions denys access, returns deny access immediately.
*
* @param authenticationResult Authentication result representing identity of requester
* @param resourceActions An Iterable of resource-actions to authorize
- *
- * @return ACCESS_OK or the Access object from the first failed check
+ * @return AuthorizationResult containing allow/deny access to the resource actions, along with policy restrictions.
*/
- public static Access authorizeAllResourceActions(
+ public static AuthorizationResult authorizeAllResourceActions(
final AuthenticationResult authenticationResult,
final Iterable resourceActions,
final AuthorizerMapper authorizerMapper
@@ -170,6 +167,7 @@ public static Access authorizeAllResourceActions(
// this method returns on first failure, so only successful Access results are kept in the cache
final Set resultCache = new HashSet<>();
+ final Map> policyFilters = new HashMap<>();
for (ResourceAction resourceAction : resourceActions) {
if (resultCache.contains(resourceAction)) {
@@ -181,54 +179,62 @@ public static Access authorizeAllResourceActions(
resourceAction.getAction()
);
if (!access.isAllowed()) {
- return access;
+ return AuthorizationResult.deny(access.getMessage());
} else {
resultCache.add(resourceAction);
+ if (resourceAction.getAction().equals(Action.READ)
+ && RESTRICTION_APPLICABLE_RESOURCE_TYPES.contains(resourceAction.getResource().getType())) {
+ // For every table read, we check on the policy returned from authorizer and add it to the map.
+ policyFilters.put(resourceAction.getResource().getName(), access.getPolicy());
+ } else if (access.getPolicy().isPresent()) {
+ throw DruidException.defensive(
+ "Policy should only present when reading a table, but was present for a different kind of resource action [%s]",
+ resourceAction
+ );
+ } else {
+ // Not a read table action, access doesn't have a filter, do nothing.
+ }
}
}
- return Access.OK;
+ return AuthorizationResult.allowWithRestriction(policyFilters);
}
+
/**
- * Check a list of resource-actions to be performed as a result of an HTTP request.
- *
- * If one of the resource-actions fails the authorization check, this method returns the failed
- * Access object from the check.
- *
- * Otherwise, return ACCESS_OK if all resource-actions were successfully authorized.
- *
- * This function will set the DRUID_AUTHORIZATION_CHECKED attribute in the request.
- *
- * If this attribute is already set when this function is called, an exception is thrown.
+ * Performs authorization check on a list of resource-actions based on the authentication fields from the request.
+ *
+ * If one of the resource-actions denys access, returns deny access immediately.
+ *
+ * This function will set the DRUID_AUTHORIZATION_CHECKED attribute in the request. If this attribute is already set
+ * when this function is called, an exception is thrown.
*
* @param request HTTP request to be authorized
* @param resourceActions An Iterable of resource-actions to authorize
- *
- * @return ACCESS_OK or the Access object from the first failed check
+ * @return AuthorizationResult containing allow/deny access to the resource actions, along with policy restrictions.
*/
- public static Access authorizeAllResourceActions(
+ public static AuthorizationResult authorizeAllResourceActions(
final HttpServletRequest request,
final Iterable resourceActions,
final AuthorizerMapper authorizerMapper
)
{
if (request.getAttribute(AuthConfig.DRUID_ALLOW_UNSECURED_PATH) != null) {
- return Access.OK;
+ return AuthorizationResult.ALLOW_NO_RESTRICTION;
}
if (request.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED) != null) {
throw new ISE("Request already had authorization check.");
}
- Access access = authorizeAllResourceActions(
+ AuthorizationResult authResult = authorizeAllResourceActions(
authenticationResultFromRequest(request),
resourceActions,
authorizerMapper
);
- request.setAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED, access.isAllowed());
- return access;
+ request.setAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED, authResult.allowBasicAccess());
+ return authResult;
}
/**
@@ -249,28 +255,22 @@ public static void setRequestAuthorizationAttributeIfNeeded(final HttpServletReq
}
/**
- * Filter a collection of resources by applying the resourceActionGenerator to each resource, return an iterable
- * containing the filtered resources.
- *
- * The resourceActionGenerator returns an Iterable for each resource.
- *
- * If every resource-action in the iterable is authorized, the resource will be added to the filtered resources.
- *
- * If there is an authorization failure for one of the resource-actions, the resource will not be
- * added to the returned filtered resources..
- *
- * If the resourceActionGenerator returns null for a resource, that resource will not be added to the filtered
- * resources.
- *
- * This function will set the DRUID_AUTHORIZATION_CHECKED attribute in the request.
- *
- * If this attribute is already set when this function is called, an exception is thrown.
+ * Return an iterable of authorized resources, by filtering the input resources with authorization checks based on the
+ * authentication fields from the request. This method does:
+ *
+ * For every resource, resourceActionGenerator generates an Iterable of ResourceAction or null.
+ *
+ * If null, continue with next resource. If any resource-action in the iterable has deny-access, continue with next
+ * resource. Only when every resource-action has allow-access, add the resource to the result.
+ *
+ *
+ * This function will set the DRUID_AUTHORIZATION_CHECKED attribute in the request. If this attribute is already set
+ * when this function is called, an exception is thrown.
*
* @param request HTTP request to be authorized
* @param resources resources to be processed into resource-actions
* @param resourceActionGenerator Function that creates an iterable of resource-actions from a resource
* @param authorizerMapper authorizer mapper
- *
* @return Iterable containing resources that were authorized
*/
public static Iterable filterAuthorizedResources(
@@ -305,24 +305,18 @@ public static Iterable filterAuthorizedResources(
}
/**
- * Filter a collection of resources by applying the resourceActionGenerator to each resource, return an iterable
- * containing the filtered resources.
- *
- * The resourceActionGenerator returns an Iterable for each resource.
- *
- * If every resource-action in the iterable is authorized, the resource will be added to the filtered resources.
- *
- * If there is an authorization failure for one of the resource-actions, the resource will not be
- * added to the returned filtered resources..
- *
- * If the resourceActionGenerator returns null for a resource, that resource will not be added to the filtered
- * resources.
+ * Return an iterable of authorized resources, by filtering the input resources with authorization checks based on
+ * authenticationResult. This method does:
+ *
+ * For every resource, resourceActionGenerator generates an Iterable of ResourceAction or null.
+ *
+ * If null, continue with next resource. If any resource-action in the iterable has deny-access, continue with next
+ * resource. Only when every resource-action has allow-access, add the resource to the result.
*
* @param authenticationResult Authentication result representing identity of requester
* @param resources resources to be processed into resource-actions
* @param resourceActionGenerator Function that creates an iterable of resource-actions from a resource
* @param authorizerMapper authorizer mapper
- *
* @return Iterable containing resources that were authorized
*/
public static Iterable filterAuthorizedResources(
@@ -369,23 +363,22 @@ public static Iterable filterAuthorizedResources(
}
/**
- * Given a map of resource lists, filter each resources list by applying the resource action generator to each
- * item in each resource list.
- *
- * The resourceActionGenerator returns an Iterable for each resource.
- *
- * If a resource list is null or has no authorized items after filtering, it will not be included in the returned
- * map.
- *
- * This function will set the DRUID_AUTHORIZATION_CHECKED attribute in the request.
- *
- * If this attribute is already set when this function is called, an exception is thrown.
+ * Return a map of authorized resources, by filtering the input resources with authorization checks based on the
+ * authentication fields from the request. This method does:
+ *
+ * For every resource, resourceActionGenerator generates an Iterable of ResourceAction or null.
+ *
+ * If null, continue with next resource. If any resource-action in the iterable has deny-access, continue with next
+ * resource. Only when every resource-action has allow-access, add the resource to the result.
+ *
+ *
+ * This function will set the DRUID_AUTHORIZATION_CHECKED attribute in the request. If this attribute is already set
+ * when this function is called, an exception is thrown.
*
* @param request HTTP request to be authorized
* @param unfilteredResources Map of resource lists to be filtered
* @param resourceActionGenerator Function that creates an iterable of resource-actions from a resource
* @param authorizerMapper authorizer mapper
- *
* @return Map containing lists of resources that were authorized
*/
public static Map> filterAuthorizedResources(
@@ -437,7 +430,7 @@ public static Map> filterAuthorizedRes
* This method constructs a 'superuser' set of permissions composed of {@link Action#READ} and {@link Action#WRITE}
* permissions for all known {@link ResourceType#knownTypes()} for any {@link Authorizer} implementation which is
* built on pattern matching with a regex.
- *
+ *
* Note that if any {@link Resource} exist that use custom types not registered with
* {@link ResourceType#registerResourceType}, those permissions will not be included in this list and will need to
* be added manually.
diff --git a/server/src/test/java/org/apache/druid/initialization/AuthorizationResultTest.java b/server/src/test/java/org/apache/druid/initialization/AuthorizationResultTest.java
new file mode 100644
index 000000000000..c6d62ce5713a
--- /dev/null
+++ b/server/src/test/java/org/apache/druid/initialization/AuthorizationResultTest.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.initialization;
+
+import com.google.common.collect.ImmutableMap;
+import nl.jqno.equalsverifier.EqualsVerifier;
+import org.apache.druid.error.DruidException;
+import org.apache.druid.query.filter.EqualityFilter;
+import org.apache.druid.query.policy.NoRestrictionPolicy;
+import org.apache.druid.query.policy.RowFilterPolicy;
+import org.apache.druid.segment.column.ColumnType;
+import org.apache.druid.server.security.AuthorizationResult;
+import org.junit.Test;
+
+import java.util.Optional;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+public class AuthorizationResultTest
+{
+ @Test
+ public void testEquals()
+ {
+ EqualsVerifier.forClass(AuthorizationResult.class)
+ .usingGetClass()
+ .verify();
+ }
+
+ @Test
+ public void testToString()
+ {
+ AuthorizationResult result = AuthorizationResult.allowWithRestriction(
+ ImmutableMap.of(
+ "table1",
+ Optional.of(NoRestrictionPolicy.instance()),
+ "table2",
+ Optional.of(
+ RowFilterPolicy.from(new EqualityFilter("column1", ColumnType.STRING, "val1", null)))
+ )
+ );
+ assertEquals(
+ "AuthorizationResult [permission=ALLOW_WITH_RESTRICTION, failureMessage=null, policyRestrictions={table1=Optional[NO_RESTRICTION], table2=Optional[RowFilterPolicy{rowFilter=column1 = val1}]}]",
+ result.toString()
+ );
+ }
+
+ @Test
+ public void testNoAccess()
+ {
+ AuthorizationResult result = AuthorizationResult.deny("this data source is not permitted");
+ assertFalse(result.allowBasicAccess());
+ assertFalse(result.allowAccessWithNoRestriction());
+ assertEquals("this data source is not permitted", result.getErrorMessage());
+ assertFalse(result.allowAccessWithNoRestriction());
+ }
+
+ @Test
+ public void testFullAccess()
+ {
+ AuthorizationResult result = AuthorizationResult.allowWithRestriction(ImmutableMap.of());
+ assertTrue(result.allowBasicAccess());
+ assertTrue(result.allowAccessWithNoRestriction());
+ assertThrows(DruidException.class, result::getErrorMessage);
+
+ AuthorizationResult resultWithEmptyPolicy = AuthorizationResult.allowWithRestriction(ImmutableMap.of(
+ "table1",
+ Optional.empty()
+ ));
+ assertTrue(resultWithEmptyPolicy.allowBasicAccess());
+ assertTrue(resultWithEmptyPolicy.allowAccessWithNoRestriction());
+ assertThrows(DruidException.class, resultWithEmptyPolicy::getErrorMessage);
+
+ AuthorizationResult resultWithNoRestrictionPolicy = AuthorizationResult.allowWithRestriction(ImmutableMap.of(
+ "table1",
+ Optional.of(NoRestrictionPolicy.instance())
+ ));
+ assertTrue(resultWithNoRestrictionPolicy.allowBasicAccess());
+ assertTrue(resultWithNoRestrictionPolicy.allowAccessWithNoRestriction());
+ assertThrows(DruidException.class, resultWithNoRestrictionPolicy::getErrorMessage);
+ }
+
+ @Test
+ public void testRestrictedAccess()
+ {
+ AuthorizationResult result = AuthorizationResult.allowWithRestriction(ImmutableMap.of(
+ "table1",
+ Optional.of(RowFilterPolicy.from(new EqualityFilter(
+ "col",
+ ColumnType.STRING,
+ "val1",
+ null
+ )))
+ ));
+ assertTrue(result.allowBasicAccess());
+ assertFalse(result.allowAccessWithNoRestriction());
+ assertEquals("Unauthorized", result.getErrorMessage());
+ }
+}
diff --git a/server/src/test/java/org/apache/druid/segment/metadata/CoordinatorSegmentMetadataCacheTest.java b/server/src/test/java/org/apache/druid/segment/metadata/CoordinatorSegmentMetadataCacheTest.java
index 22b0890e855e..d56db2ea4418 100644
--- a/server/src/test/java/org/apache/druid/segment/metadata/CoordinatorSegmentMetadataCacheTest.java
+++ b/server/src/test/java/org/apache/druid/segment/metadata/CoordinatorSegmentMetadataCacheTest.java
@@ -78,8 +78,8 @@
import org.apache.druid.server.coordinator.loading.SegmentReplicaCount;
import org.apache.druid.server.coordinator.loading.SegmentReplicationStatus;
import org.apache.druid.server.metrics.NoopServiceEmitter;
-import org.apache.druid.server.security.Access;
import org.apache.druid.server.security.AllowAllAuthenticator;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.NoopEscalator;
import org.apache.druid.timeline.DataSegment;
import org.apache.druid.timeline.SegmentId;
@@ -1064,7 +1064,11 @@ public void testRunSegmentMetadataQueryWithContext() throws Exception
EasyMock.expect(factoryMock.factorize()).andReturn(lifecycleMock).once();
// This is the mat of the test, making sure that the query created by the method under test matches the expected query, specifically the operator configured context
- EasyMock.expect(lifecycleMock.runSimple(expectedMetadataQuery, AllowAllAuthenticator.ALLOW_ALL_RESULT, Access.OK))
+ EasyMock.expect(lifecycleMock.runSimple(
+ expectedMetadataQuery,
+ AllowAllAuthenticator.ALLOW_ALL_RESULT,
+ AuthorizationResult.ALLOW_NO_RESTRICTION
+ ))
.andReturn(QueryResponse.withEmptyContext(Sequences.empty()));
EasyMock.replay(factoryMock, lifecycleMock);
@@ -2299,7 +2303,11 @@ public void testTombstoneSegmentIsNotRefreshed() throws IOException
);
EasyMock.expect(factoryMock.factorize()).andReturn(lifecycleMock).once();
- EasyMock.expect(lifecycleMock.runSimple(expectedMetadataQuery, AllowAllAuthenticator.ALLOW_ALL_RESULT, Access.OK))
+ EasyMock.expect(lifecycleMock.runSimple(
+ expectedMetadataQuery,
+ AllowAllAuthenticator.ALLOW_ALL_RESULT,
+ AuthorizationResult.ALLOW_NO_RESTRICTION
+ ))
.andReturn(QueryResponse.withEmptyContext(Sequences.empty())).once();
EasyMock.replay(factoryMock, lifecycleMock);
diff --git a/server/src/test/java/org/apache/druid/server/QueryLifecycleTest.java b/server/src/test/java/org/apache/druid/server/QueryLifecycleTest.java
index 8bc436ed405c..d63a162d56de 100644
--- a/server/src/test/java/org/apache/druid/server/QueryLifecycleTest.java
+++ b/server/src/test/java/org/apache/druid/server/QueryLifecycleTest.java
@@ -22,31 +22,44 @@
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
+import org.apache.druid.error.DruidException;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.java.util.common.Intervals;
import org.apache.druid.java.util.common.guava.Sequences;
import org.apache.druid.java.util.emitter.service.ServiceEmitter;
+import org.apache.druid.query.DataSource;
import org.apache.druid.query.DefaultQueryConfig;
import org.apache.druid.query.Druids;
import org.apache.druid.query.GenericQueryMetricsFactory;
+import org.apache.druid.query.Query;
import org.apache.druid.query.QueryContextTest;
import org.apache.druid.query.QueryMetrics;
import org.apache.druid.query.QueryRunner;
import org.apache.druid.query.QueryRunnerFactoryConglomerate;
import org.apache.druid.query.QuerySegmentWalker;
import org.apache.druid.query.QueryToolChest;
+import org.apache.druid.query.RestrictedDataSource;
+import org.apache.druid.query.TableDataSource;
import org.apache.druid.query.aggregation.CountAggregatorFactory;
+import org.apache.druid.query.filter.DimFilter;
+import org.apache.druid.query.filter.NullFilter;
+import org.apache.druid.query.metadata.metadata.SegmentMetadataQuery;
+import org.apache.druid.query.policy.NoRestrictionPolicy;
+import org.apache.druid.query.policy.Policy;
+import org.apache.druid.query.policy.RowFilterPolicy;
import org.apache.druid.query.timeseries.TimeseriesQuery;
import org.apache.druid.server.log.RequestLogger;
import org.apache.druid.server.security.Access;
import org.apache.druid.server.security.Action;
import org.apache.druid.server.security.AuthConfig;
import org.apache.druid.server.security.AuthenticationResult;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.Authorizer;
import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.Resource;
import org.apache.druid.server.security.ResourceType;
import org.easymock.EasyMock;
+import org.easymock.IArgumentMatcher;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@@ -55,9 +68,9 @@
import org.junit.rules.ExpectedException;
import javax.servlet.http.HttpServletRequest;
-
import java.util.HashMap;
import java.util.Map;
+import java.util.Optional;
public class QueryLifecycleTest
{
@@ -65,6 +78,8 @@ public class QueryLifecycleTest
private static final String IDENTITY = "some_identity";
private static final String AUTHORIZER = "some_authorizer";
+ private static final Resource RESOURCE = new Resource(DATASOURCE, ResourceType.DATASOURCE);
+
private final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder()
.dataSource(DATASOURCE)
.intervals(ImmutableList.of(Intervals.ETERNITY))
@@ -142,7 +157,7 @@ public void teardown()
}
@Test
- public void testRunSimplePreauthorized()
+ public void testRunSimple_preauthorizedAsSuperuser()
{
EasyMock.expect(queryConfig.getContext()).andReturn(ImmutableMap.of()).anyTimes();
EasyMock.expect(authenticationResult.getIdentity()).andReturn(IDENTITY).anyTimes();
@@ -157,28 +172,297 @@ public void testRunSimplePreauthorized()
replayAll();
QueryLifecycle lifecycle = createLifecycle(new AuthConfig());
- lifecycle.runSimple(query, authenticationResult, Access.OK);
+ lifecycle.runSimple(query, authenticationResult, AuthorizationResult.ALLOW_NO_RESTRICTION);
}
@Test
public void testRunSimpleUnauthorized()
{
- expectedException.expect(ISE.class);
- expectedException.expectMessage(Access.DEFAULT_ERROR_MESSAGE);
+ expectedException.expect(DruidException.class);
+ expectedException.expectMessage("Unexpected state [UNAUTHORIZED], expecting [AUTHORIZED]");
+
+ EasyMock.expect(queryConfig.getContext()).andReturn(ImmutableMap.of()).anyTimes();
+ EasyMock.expect(authenticationResult.getIdentity()).andReturn(IDENTITY).anyTimes();
+ EasyMock.expect(conglomerate.getToolChest(EasyMock.anyObject()))
+ .andReturn(toolChest)
+ .once();
+ EasyMock.expect(toolChest.makeMetrics(EasyMock.anyObject())).andReturn(metrics).anyTimes();
+ replayAll();
+
+ QueryLifecycle lifecycle = createLifecycle(new AuthConfig());
+ lifecycle.runSimple(query, authenticationResult, AuthorizationResult.DENY);
+ }
+
+ @Test
+ public void testRunSimple_withPolicyRestriction()
+ {
+ // Test the path when an external client send a sql query to broker, through runSimple.
+ Policy rowFilterPolicy = RowFilterPolicy.from(new NullFilter("some-column", null));
+ AuthorizationResult authorizationResult = AuthorizationResult.allowWithRestriction(ImmutableMap.of(
+ DATASOURCE,
+ Optional.of(rowFilterPolicy)
+ ));
+ DataSource expectedDataSource = RestrictedDataSource.create(TableDataSource.create(DATASOURCE), rowFilterPolicy);
+
+ final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder()
+ .dataSource(DATASOURCE)
+ .intervals(ImmutableList.of(Intervals.ETERNITY))
+ .aggregators(new CountAggregatorFactory("chocula"))
+ .build();
+ EasyMock.expect(queryConfig.getContext()).andReturn(ImmutableMap.of()).anyTimes();
+ EasyMock.expect(authenticationResult.getIdentity()).andReturn(IDENTITY).anyTimes();
+ EasyMock.expect(authenticationResult.getAuthorizerName()).andReturn(AUTHORIZER).anyTimes();
+ EasyMock.expect(conglomerate.getToolChest(EasyMock.anyObject()))
+ .andReturn(toolChest).once();
+ // We're expecting the data source in the query to be transformed to a RestrictedDataSource, with policy.
+ // Any other DataSource would throw AssertionError.
+ EasyMock.expect(texasRanger.getQueryRunnerForIntervals(
+ queryMatchDataSource(expectedDataSource),
+ EasyMock.anyObject()
+ )).andReturn(runner).once();
+ EasyMock.expect(runner.run(EasyMock.anyObject(), EasyMock.anyObject())).andReturn(Sequences.empty()).once();
+ replayAll();
+
+ AuthConfig authConfig = AuthConfig.newBuilder()
+ .setAuthorizeQueryContextParams(true)
+ .build();
+ QueryLifecycle lifecycle = createLifecycle(authConfig);
+ lifecycle.runSimple(query, authenticationResult, authorizationResult);
+ }
+
+ @Test
+ public void testRunSimple_withPolicyRestriction_segmentMetadataQueryRunAsInternal()
+ {
+ // Test the path when broker sends SegmentMetadataQuery to historical, through runSimple.
+ // The druid-internal gets a NoRestrictionPolicy.
+ AuthorizationResult authorizationResult = AuthorizationResult.allowWithRestriction(ImmutableMap.of(
+ DATASOURCE,
+ Optional.of(NoRestrictionPolicy.instance())
+ ));
+ final SegmentMetadataQuery query = Druids.newSegmentMetadataQueryBuilder()
+ .dataSource(DATASOURCE)
+ .intervals(ImmutableList.of(Intervals.ETERNITY))
+ .build();
+ EasyMock.expect(queryConfig.getContext()).andReturn(ImmutableMap.of()).anyTimes();
+ EasyMock.expect(authenticationResult.getIdentity()).andReturn(IDENTITY).anyTimes();
+ EasyMock.expect(authenticationResult.getAuthorizerName()).andReturn(AUTHORIZER).anyTimes();
+ EasyMock.expect(conglomerate.getToolChest(EasyMock.anyObject()))
+ .andReturn(toolChest).once();
+ // We're expecting the data source in the query to still be TableDataSource.
+ // Any other DataSource would throw AssertionError.
+ EasyMock.expect(texasRanger.getQueryRunnerForIntervals(
+ queryMatchDataSource(TableDataSource.create(DATASOURCE)),
+ EasyMock.anyObject()
+ )).andReturn(runner).once();
+ EasyMock.expect(runner.run(EasyMock.anyObject(), EasyMock.anyObject())).andReturn(Sequences.empty()).once();
+ replayAll();
+
+ QueryLifecycle lifecycle = createLifecycle(new AuthConfig());
+ lifecycle.runSimple(query, authenticationResult, authorizationResult);
+ }
+
+
+ @Test
+ public void testRunSimple_withPolicyRestriction_segmentMetadataQueryRunAsExternal()
+ {
+ Policy policy = RowFilterPolicy.from(new NullFilter("col", null));
+ AuthorizationResult authorizationResult = AuthorizationResult.allowWithRestriction(ImmutableMap.of(
+ DATASOURCE,
+ Optional.of(policy)
+ ));
+ final SegmentMetadataQuery query = Druids.newSegmentMetadataQueryBuilder()
+ .dataSource(DATASOURCE)
+ .intervals(ImmutableList.of(Intervals.ETERNITY))
+ .build();
+ EasyMock.expect(queryConfig.getContext()).andReturn(ImmutableMap.of()).anyTimes();
+ EasyMock.expect(authenticationResult.getIdentity()).andReturn(IDENTITY).anyTimes();
+ EasyMock.expect(authenticationResult.getAuthorizerName()).andReturn(AUTHORIZER).anyTimes();
+ EasyMock.expect(conglomerate.getToolChest(EasyMock.anyObject()))
+ .andReturn(toolChest).once();
+ EasyMock.expect(toolChest.makeMetrics(EasyMock.anyObject())).andReturn(metrics).once();
+ replayAll();
+
+ QueryLifecycle lifecycle = createLifecycle(new AuthConfig());
+ Assert.assertThrows(Exception.class, () -> lifecycle.runSimple(query, authenticationResult, authorizationResult));
+ }
+ @Test
+ public void testRunSimple_withoutPolicy()
+ {
+ AuthorizationResult authorizationResult = AuthorizationResult.ALLOW_NO_RESTRICTION;
EasyMock.expect(queryConfig.getContext()).andReturn(ImmutableMap.of()).anyTimes();
EasyMock.expect(authenticationResult.getIdentity()).andReturn(IDENTITY).anyTimes();
EasyMock.expect(conglomerate.getToolChest(EasyMock.anyObject()))
.andReturn(toolChest)
.once();
+ EasyMock.expect(texasRanger.getQueryRunnerForIntervals(EasyMock.anyObject(), EasyMock.anyObject()))
+ .andReturn(runner)
+ .anyTimes();
+ EasyMock.expect(runner.run(EasyMock.anyObject(), EasyMock.anyObject())).andReturn(Sequences.empty()).anyTimes();
+ EasyMock.expect(toolChest.makeMetrics(EasyMock.anyObject())).andReturn(metrics).anyTimes();
+ replayAll();
+
+ QueryLifecycle lifecycle = createLifecycle(AuthConfig.newBuilder().build());
+ lifecycle.runSimple(query, authenticationResult, authorizationResult);
+ }
+
+ @Test
+ public void testRunSimple_foundMultiplePolicyRestrictions()
+ {
+ // Multiple policy restrictions indicates most likely the system is trying to double-authorizing the request
+ // This is not allowed in any case.
+ expectedException.expect(ISE.class);
+ expectedException.expectMessage(
+ "Multiple restrictions on table [some_datasource]: policy [RowFilterPolicy{rowFilter=some-column IS NULL}] and policy [RowFilterPolicy{rowFilter=some-column2 IS NULL}]");
+
+ DimFilter originalFilterOnRDS = new NullFilter("some-column", null);
+ Policy originalFilterPolicy = RowFilterPolicy.from(originalFilterOnRDS);
+
+ Policy newFilterPolicy = RowFilterPolicy.from(new NullFilter("some-column2", null));
+ AuthorizationResult authorizationResult = AuthorizationResult.allowWithRestriction(ImmutableMap.of(
+ DATASOURCE,
+ Optional.of(newFilterPolicy)
+ ));
+
+ final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder()
+ .dataSource(RestrictedDataSource.create(
+ TableDataSource.create(DATASOURCE),
+ originalFilterPolicy
+ ))
+ .intervals(ImmutableList.of(Intervals.ETERNITY))
+ .aggregators(new CountAggregatorFactory("chocula"))
+ .build();
+ EasyMock.expect(queryConfig.getContext()).andReturn(ImmutableMap.of()).anyTimes();
+ EasyMock.expect(authenticationResult.getIdentity()).andReturn(IDENTITY).anyTimes();
+ EasyMock.expect(authenticationResult.getAuthorizerName()).andReturn(AUTHORIZER).anyTimes();
+ EasyMock.expect(conglomerate.getToolChest(EasyMock.anyObject())).andReturn(toolChest).anyTimes();
+ EasyMock.expect(toolChest.makeMetrics(EasyMock.anyObject())).andReturn(metrics).anyTimes();
+ EasyMock.expect(texasRanger.getQueryRunnerForIntervals(EasyMock.anyObject(), EasyMock.anyObject()))
+ .andReturn(runner).anyTimes();
+ EasyMock.expect(runner.run(EasyMock.anyObject(), EasyMock.anyObject())).andReturn(Sequences.empty()).anyTimes();
+ replayAll();
+ QueryLifecycle lifecycle = createLifecycle(new AuthConfig());
+ lifecycle.runSimple(query, authenticationResult, authorizationResult);
+ }
+
+ @Test
+ public void testRunSimple_queryWithRestrictedDataSource_policyRestrictionMightHaveBeenRemoved()
+ {
+ expectedException.expect(ISE.class);
+ expectedException.expectMessage(
+ "No restriction found on table [some_datasource], but had policy [RowFilterPolicy{rowFilter=some-column IS NULL}] before.");
+
+ DimFilter originalFilterOnRDS = new NullFilter("some-column", null);
+ Policy originalFilterPolicy = RowFilterPolicy.from(originalFilterOnRDS);
+ DataSource restrictedDataSource = RestrictedDataSource.create(
+ TableDataSource.create(DATASOURCE),
+ originalFilterPolicy
+ );
+
+ // The query is built on a restricted data source, but we didn't find any policy, which could be one of:
+ // 1. policy restriction might have been removed
+ // 2. some bug in the system
+ // In this case, we throw an exception to be safe.
+ AuthorizationResult authorizationResult = AuthorizationResult.allowWithRestriction(ImmutableMap.of(
+ DATASOURCE,
+ Optional.empty()
+ ));
+
+ final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder()
+ .dataSource(restrictedDataSource)
+ .intervals(ImmutableList.of(Intervals.ETERNITY))
+ .aggregators(new CountAggregatorFactory("chocula"))
+ .build();
+ EasyMock.expect(queryConfig.getContext()).andReturn(ImmutableMap.of()).anyTimes();
+ EasyMock.expect(authenticationResult.getIdentity()).andReturn(IDENTITY).anyTimes();
+ EasyMock.expect(authenticationResult.getAuthorizerName()).andReturn(AUTHORIZER).anyTimes();
+ EasyMock.expect(conglomerate.getToolChest(EasyMock.anyObject())).andReturn(toolChest).anyTimes();
EasyMock.expect(toolChest.makeMetrics(EasyMock.anyObject())).andReturn(metrics).anyTimes();
+ EasyMock.expect(texasRanger.getQueryRunnerForIntervals(EasyMock.anyObject(), EasyMock.anyObject()))
+ .andReturn(runner).anyTimes();
+ EasyMock.expect(runner.run(EasyMock.anyObject(), EasyMock.anyObject())).andReturn(Sequences.empty()).anyTimes();
+ replayAll();
+ QueryLifecycle lifecycle = createLifecycle(new AuthConfig());
+ lifecycle.runSimple(query, authenticationResult, authorizationResult);
+ }
+ @Test
+ public void testAuthorized_withPolicyRestriction()
+ {
+ // Test the path broker receives a native json query from external client, should add restriction on data source
+ Policy rowFilterPolicy = RowFilterPolicy.from(new NullFilter("some-column", null));
+ Access access = Access.allowWithRestriction(rowFilterPolicy);
+
+ DataSource expectedDataSource = RestrictedDataSource.create(TableDataSource.create(DATASOURCE), rowFilterPolicy);
+
+ final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder()
+ .dataSource(DATASOURCE)
+ .intervals(ImmutableList.of(Intervals.ETERNITY))
+ .aggregators(new CountAggregatorFactory("chocula"))
+ .build();
+ EasyMock.expect(queryConfig.getContext()).andReturn(ImmutableMap.of()).anyTimes();
+ EasyMock.expect(authenticationResult.getIdentity()).andReturn(IDENTITY).anyTimes();
+ EasyMock.expect(authenticationResult.getAuthorizerName()).andReturn(AUTHORIZER).anyTimes();
+ EasyMock.expect(authorizer.authorize(authenticationResult, RESOURCE, Action.READ))
+ .andReturn(access).anyTimes();
+ EasyMock.expect(conglomerate.getToolChest(EasyMock.anyObject()))
+ .andReturn(toolChest).anyTimes();
+ // We're expecting the data source in the query to be transformed to a RestrictedDataSource, with policy.
+ // Any other DataSource would throw AssertionError.
+ EasyMock.expect(texasRanger.getQueryRunnerForIntervals(
+ queryMatchDataSource(expectedDataSource),
+ EasyMock.anyObject()
+ ))
+ .andReturn(runner).anyTimes();
+ EasyMock.expect(runner.run(EasyMock.anyObject(), EasyMock.anyObject())).andReturn(Sequences.empty()).anyTimes();
replayAll();
QueryLifecycle lifecycle = createLifecycle(new AuthConfig());
- lifecycle.runSimple(query, authenticationResult, new Access(false));
+ lifecycle.initialize(query);
+ Assert.assertTrue(lifecycle.authorize(authenticationResult).allowBasicAccess());
+ lifecycle.execute();
+ }
+
+ @Test
+ public void testAuthorized_queryWithRestrictedDataSource_runWithSuperUserPermission()
+ {
+ // Test the path historical receives a native json query from broker, query already has restriction on data source
+ Policy rowFilterPolicy = RowFilterPolicy.from(new NullFilter("some-column", null));
+ // Internal druid system would get a NO_RESTRICTION on a restricted data source.
+ Access access = Access.allowWithRestriction(NoRestrictionPolicy.instance());
+
+ DataSource restrictedDataSource = RestrictedDataSource.create(TableDataSource.create(DATASOURCE), rowFilterPolicy);
+
+ final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder()
+ .dataSource(restrictedDataSource)
+ .intervals(ImmutableList.of(Intervals.ETERNITY))
+ .aggregators(new CountAggregatorFactory("chocula"))
+ .build();
+ EasyMock.expect(queryConfig.getContext()).andReturn(ImmutableMap.of()).anyTimes();
+ EasyMock.expect(authenticationResult.getIdentity()).andReturn(IDENTITY).anyTimes();
+ EasyMock.expect(authenticationResult.getAuthorizerName()).andReturn(AUTHORIZER).anyTimes();
+ EasyMock.expect(authorizer.authorize(authenticationResult, RESOURCE, Action.READ))
+ .andReturn(access).anyTimes();
+ EasyMock.expect(conglomerate.getToolChest(EasyMock.anyObject()))
+ .andReturn(toolChest).anyTimes();
+ // We're expecting the data source in the query to be the same RestrictedDataSource.
+ EasyMock.expect(texasRanger.getQueryRunnerForIntervals(
+ queryMatchDataSource(restrictedDataSource),
+ EasyMock.anyObject()
+ ))
+ .andReturn(runner).anyTimes();
+ EasyMock.expect(runner.run(EasyMock.anyObject(), EasyMock.anyObject())).andReturn(Sequences.empty()).anyTimes();
+ replayAll();
+
+ AuthConfig authConfig = AuthConfig.newBuilder()
+ .setAuthorizeQueryContextParams(true)
+ .build();
+ QueryLifecycle lifecycle = createLifecycle(authConfig);
+ lifecycle.initialize(query);
+ Assert.assertTrue(lifecycle.authorize(authenticationResult).allowBasicAccess());
+ lifecycle.execute();
}
@Test
@@ -187,11 +471,23 @@ public void testAuthorizeQueryContext_authorized()
EasyMock.expect(queryConfig.getContext()).andReturn(ImmutableMap.of()).anyTimes();
EasyMock.expect(authenticationResult.getIdentity()).andReturn(IDENTITY).anyTimes();
EasyMock.expect(authenticationResult.getAuthorizerName()).andReturn(AUTHORIZER).anyTimes();
- EasyMock.expect(authorizer.authorize(authenticationResult, new Resource(DATASOURCE, ResourceType.DATASOURCE), Action.READ))
+ EasyMock.expect(authorizer.authorize(
+ authenticationResult,
+ new Resource(DATASOURCE, ResourceType.DATASOURCE),
+ Action.READ
+ ))
.andReturn(Access.OK).times(2);
- EasyMock.expect(authorizer.authorize(authenticationResult, new Resource("foo", ResourceType.QUERY_CONTEXT), Action.WRITE))
+ EasyMock.expect(authorizer.authorize(
+ authenticationResult,
+ new Resource("foo", ResourceType.QUERY_CONTEXT),
+ Action.WRITE
+ ))
.andReturn(Access.OK).times(2);
- EasyMock.expect(authorizer.authorize(authenticationResult, new Resource("baz", ResourceType.QUERY_CONTEXT), Action.WRITE))
+ EasyMock.expect(authorizer.authorize(
+ authenticationResult,
+ new Resource("baz", ResourceType.QUERY_CONTEXT),
+ Action.WRITE
+ ))
.andReturn(Access.OK).times(2);
EasyMock.expect(conglomerate.getToolChest(EasyMock.anyObject()))
@@ -209,8 +505,8 @@ public void testAuthorizeQueryContext_authorized()
.build();
AuthConfig authConfig = AuthConfig.newBuilder()
- .setAuthorizeQueryContextParams(true)
- .build();
+ .setAuthorizeQueryContextParams(true)
+ .build();
QueryLifecycle lifecycle = createLifecycle(authConfig);
lifecycle.initialize(query);
@@ -222,11 +518,11 @@ public void testAuthorizeQueryContext_authorized()
revisedContext
);
- Assert.assertTrue(lifecycle.authorize(mockRequest()).isAllowed());
+ Assert.assertTrue(lifecycle.authorize(mockRequest()).allowAccessWithNoRestriction());
lifecycle = createLifecycle(authConfig);
lifecycle.initialize(query);
- Assert.assertTrue(lifecycle.authorize(authenticationResult).isAllowed());
+ Assert.assertTrue(lifecycle.authorize(authenticationResult).allowAccessWithNoRestriction());
}
@Test
@@ -235,10 +531,18 @@ public void testAuthorizeQueryContext_notAuthorized()
EasyMock.expect(queryConfig.getContext()).andReturn(ImmutableMap.of()).anyTimes();
EasyMock.expect(authenticationResult.getIdentity()).andReturn(IDENTITY).anyTimes();
EasyMock.expect(authenticationResult.getAuthorizerName()).andReturn(AUTHORIZER).anyTimes();
- EasyMock.expect(authorizer.authorize(authenticationResult, new Resource(DATASOURCE, ResourceType.DATASOURCE), Action.READ))
+ EasyMock.expect(authorizer.authorize(
+ authenticationResult,
+ new Resource(DATASOURCE, ResourceType.DATASOURCE),
+ Action.READ
+ ))
.andReturn(Access.OK)
.times(2);
- EasyMock.expect(authorizer.authorize(authenticationResult, new Resource("foo", ResourceType.QUERY_CONTEXT), Action.WRITE))
+ EasyMock.expect(authorizer.authorize(
+ authenticationResult,
+ new Resource("foo", ResourceType.QUERY_CONTEXT),
+ Action.WRITE
+ ))
.andReturn(Access.DENIED)
.times(2);
@@ -256,15 +560,15 @@ public void testAuthorizeQueryContext_notAuthorized()
.build();
AuthConfig authConfig = AuthConfig.newBuilder()
- .setAuthorizeQueryContextParams(true)
- .build();
+ .setAuthorizeQueryContextParams(true)
+ .build();
QueryLifecycle lifecycle = createLifecycle(authConfig);
lifecycle.initialize(query);
- Assert.assertFalse(lifecycle.authorize(mockRequest()).isAllowed());
+ Assert.assertFalse(lifecycle.authorize(mockRequest()).allowBasicAccess());
lifecycle = createLifecycle(authConfig);
lifecycle.initialize(query);
- Assert.assertFalse(lifecycle.authorize(authenticationResult).isAllowed());
+ Assert.assertFalse(lifecycle.authorize(authenticationResult).allowBasicAccess());
}
@Test
@@ -273,7 +577,7 @@ public void testAuthorizeQueryContext_unsecuredKeys()
EasyMock.expect(queryConfig.getContext()).andReturn(ImmutableMap.of()).anyTimes();
EasyMock.expect(authenticationResult.getIdentity()).andReturn(IDENTITY).anyTimes();
EasyMock.expect(authenticationResult.getAuthorizerName()).andReturn(AUTHORIZER).anyTimes();
- EasyMock.expect(authorizer.authorize(authenticationResult, new Resource(DATASOURCE, ResourceType.DATASOURCE), Action.READ))
+ EasyMock.expect(authorizer.authorize(authenticationResult, RESOURCE, Action.READ))
.andReturn(Access.OK)
.times(2);
@@ -292,9 +596,9 @@ public void testAuthorizeQueryContext_unsecuredKeys()
.build();
AuthConfig authConfig = AuthConfig.newBuilder()
- .setAuthorizeQueryContextParams(true)
- .setUnsecuredContextKeys(ImmutableSet.of("foo", "baz"))
- .build();
+ .setAuthorizeQueryContextParams(true)
+ .setUnsecuredContextKeys(ImmutableSet.of("foo", "baz"))
+ .build();
QueryLifecycle lifecycle = createLifecycle(authConfig);
lifecycle.initialize(query);
@@ -306,11 +610,11 @@ public void testAuthorizeQueryContext_unsecuredKeys()
revisedContext
);
- Assert.assertTrue(lifecycle.authorize(mockRequest()).isAllowed());
+ Assert.assertTrue(lifecycle.authorize(mockRequest()).allowAccessWithNoRestriction());
lifecycle = createLifecycle(authConfig);
lifecycle.initialize(query);
- Assert.assertTrue(lifecycle.authorize(authenticationResult).isAllowed());
+ Assert.assertTrue(lifecycle.authorize(authenticationResult).allowAccessWithNoRestriction());
}
@Test
@@ -319,7 +623,11 @@ public void testAuthorizeQueryContext_securedKeys()
EasyMock.expect(queryConfig.getContext()).andReturn(ImmutableMap.of()).anyTimes();
EasyMock.expect(authenticationResult.getIdentity()).andReturn(IDENTITY).anyTimes();
EasyMock.expect(authenticationResult.getAuthorizerName()).andReturn(AUTHORIZER).anyTimes();
- EasyMock.expect(authorizer.authorize(authenticationResult, new Resource(DATASOURCE, ResourceType.DATASOURCE), Action.READ))
+ EasyMock.expect(authorizer.authorize(
+ authenticationResult,
+ new Resource(DATASOURCE, ResourceType.DATASOURCE),
+ Action.READ
+ ))
.andReturn(Access.OK)
.times(2);
@@ -338,10 +646,10 @@ public void testAuthorizeQueryContext_securedKeys()
.build();
AuthConfig authConfig = AuthConfig.newBuilder()
- .setAuthorizeQueryContextParams(true)
- // We have secured keys, just not what the user gave.
- .setSecuredContextKeys(ImmutableSet.of("foo2", "baz2"))
- .build();
+ .setAuthorizeQueryContextParams(true)
+ // We have secured keys, just not what the user gave.
+ .setSecuredContextKeys(ImmutableSet.of("foo2", "baz2"))
+ .build();
QueryLifecycle lifecycle = createLifecycle(authConfig);
lifecycle.initialize(query);
@@ -353,11 +661,11 @@ public void testAuthorizeQueryContext_securedKeys()
revisedContext
);
- Assert.assertTrue(lifecycle.authorize(mockRequest()).isAllowed());
+ Assert.assertTrue(lifecycle.authorize(mockRequest()).allowBasicAccess());
lifecycle = createLifecycle(authConfig);
lifecycle.initialize(query);
- Assert.assertTrue(lifecycle.authorize(authenticationResult).isAllowed());
+ Assert.assertTrue(lifecycle.authorize(authenticationResult).allowBasicAccess());
}
@Test
@@ -366,10 +674,18 @@ public void testAuthorizeQueryContext_securedKeysNotAuthorized()
EasyMock.expect(queryConfig.getContext()).andReturn(ImmutableMap.of()).anyTimes();
EasyMock.expect(authenticationResult.getIdentity()).andReturn(IDENTITY).anyTimes();
EasyMock.expect(authenticationResult.getAuthorizerName()).andReturn(AUTHORIZER).anyTimes();
- EasyMock.expect(authorizer.authorize(authenticationResult, new Resource(DATASOURCE, ResourceType.DATASOURCE), Action.READ))
+ EasyMock.expect(authorizer.authorize(
+ authenticationResult,
+ new Resource(DATASOURCE, ResourceType.DATASOURCE),
+ Action.READ
+ ))
.andReturn(Access.OK)
.times(2);
- EasyMock.expect(authorizer.authorize(authenticationResult, new Resource("foo", ResourceType.QUERY_CONTEXT), Action.WRITE))
+ EasyMock.expect(authorizer.authorize(
+ authenticationResult,
+ new Resource("foo", ResourceType.QUERY_CONTEXT),
+ Action.WRITE
+ ))
.andReturn(Access.DENIED)
.times(2);
@@ -388,17 +704,17 @@ public void testAuthorizeQueryContext_securedKeysNotAuthorized()
.build();
AuthConfig authConfig = AuthConfig.newBuilder()
- .setAuthorizeQueryContextParams(true)
- // We have secured keys. User used one of them.
- .setSecuredContextKeys(ImmutableSet.of("foo", "baz2"))
- .build();
+ .setAuthorizeQueryContextParams(true)
+ // We have secured keys. User used one of them.
+ .setSecuredContextKeys(ImmutableSet.of("foo", "baz2"))
+ .build();
QueryLifecycle lifecycle = createLifecycle(authConfig);
lifecycle.initialize(query);
- Assert.assertFalse(lifecycle.authorize(mockRequest()).isAllowed());
+ Assert.assertFalse(lifecycle.authorize(mockRequest()).allowBasicAccess());
lifecycle = createLifecycle(authConfig);
lifecycle.initialize(query);
- Assert.assertFalse(lifecycle.authorize(authenticationResult).isAllowed());
+ Assert.assertFalse(lifecycle.authorize(authenticationResult).allowBasicAccess());
}
@Test
@@ -407,13 +723,25 @@ public void testAuthorizeLegacyQueryContext_authorized()
EasyMock.expect(queryConfig.getContext()).andReturn(ImmutableMap.of()).anyTimes();
EasyMock.expect(authenticationResult.getIdentity()).andReturn(IDENTITY).anyTimes();
EasyMock.expect(authenticationResult.getAuthorizerName()).andReturn(AUTHORIZER).anyTimes();
- EasyMock.expect(authorizer.authorize(authenticationResult, new Resource("fake", ResourceType.DATASOURCE), Action.READ))
+ EasyMock.expect(authorizer.authorize(
+ authenticationResult,
+ new Resource("fake", ResourceType.DATASOURCE),
+ Action.READ
+ ))
.andReturn(Access.OK)
.times(2);
- EasyMock.expect(authorizer.authorize(authenticationResult, new Resource("foo", ResourceType.QUERY_CONTEXT), Action.WRITE))
+ EasyMock.expect(authorizer.authorize(
+ authenticationResult,
+ new Resource("foo", ResourceType.QUERY_CONTEXT),
+ Action.WRITE
+ ))
.andReturn(Access.OK)
.times(2);
- EasyMock.expect(authorizer.authorize(authenticationResult, new Resource("baz", ResourceType.QUERY_CONTEXT), Action.WRITE))
+ EasyMock.expect(authorizer.authorize(
+ authenticationResult,
+ new Resource("baz", ResourceType.QUERY_CONTEXT),
+ Action.WRITE
+ ))
.andReturn(Access.OK)
.times(2);
@@ -423,7 +751,12 @@ public void testAuthorizeLegacyQueryContext_authorized()
replayAll();
- final QueryContextTest.LegacyContextQuery query = new QueryContextTest.LegacyContextQuery(ImmutableMap.of("foo", "bar", "baz", "qux"));
+ final QueryContextTest.LegacyContextQuery query = new QueryContextTest.LegacyContextQuery(ImmutableMap.of(
+ "foo",
+ "bar",
+ "baz",
+ "qux"
+ ));
AuthConfig authConfig = AuthConfig.newBuilder()
.setAuthorizeQueryContextParams(true)
@@ -437,11 +770,31 @@ public void testAuthorizeLegacyQueryContext_authorized()
Assert.assertTrue(revisedContext.containsKey("baz"));
Assert.assertTrue(revisedContext.containsKey("queryId"));
- Assert.assertTrue(lifecycle.authorize(mockRequest()).isAllowed());
+ Assert.assertTrue(lifecycle.authorize(mockRequest()).allowBasicAccess());
lifecycle = createLifecycle(authConfig);
lifecycle.initialize(query);
- Assert.assertTrue(lifecycle.authorize(mockRequest()).isAllowed());
+ Assert.assertTrue(lifecycle.authorize(mockRequest()).allowBasicAccess());
+ }
+
+ public static Query> queryMatchDataSource(DataSource dataSource)
+ {
+ EasyMock.reportMatcher(new IArgumentMatcher()
+ {
+ @Override
+ public boolean matches(Object query)
+ {
+ return query instanceof Query
+ && ((Query>) query).getDataSource().equals(dataSource);
+ }
+
+ @Override
+ public void appendTo(StringBuffer buffer)
+ {
+ buffer.append("dataSource(\"").append(dataSource).append("\")");
+ }
+ });
+ return null;
}
private HttpServletRequest mockRequest()
diff --git a/server/src/test/java/org/apache/druid/server/QueryResourceTest.java b/server/src/test/java/org/apache/druid/server/QueryResourceTest.java
index b5580f90dfec..6f87127b0968 100644
--- a/server/src/test/java/org/apache/druid/server/QueryResourceTest.java
+++ b/server/src/test/java/org/apache/druid/server/QueryResourceTest.java
@@ -64,6 +64,8 @@
import org.apache.druid.query.Result;
import org.apache.druid.query.SegmentDescriptor;
import org.apache.druid.query.TruncatedResponseContextException;
+import org.apache.druid.query.filter.NullFilter;
+import org.apache.druid.query.policy.RowFilterPolicy;
import org.apache.druid.query.timeboundary.TimeBoundaryResultValue;
import org.apache.druid.server.initialization.ServerConfig;
import org.apache.druid.server.log.TestRequestLogger;
@@ -124,7 +126,8 @@
public class QueryResourceTest
{
- private static final DefaultQueryRunnerFactoryConglomerate CONGLOMERATE = DefaultQueryRunnerFactoryConglomerate.buildFromQueryRunnerFactories(ImmutableMap.of());
+ private static final DefaultQueryRunnerFactoryConglomerate CONGLOMERATE = DefaultQueryRunnerFactoryConglomerate.buildFromQueryRunnerFactories(
+ ImmutableMap.of());
private static final AuthenticationResult AUTHENTICATION_RESULT =
new AuthenticationResult("druid", "druid", null, null);
@@ -298,7 +301,9 @@ public void testGoodQueryWithQueryConfigOverrideDefault() throws IOException
final List> responses = jsonMapper.readValue(
response.baos.toByteArray(),
- new TypeReference<>() {}
+ new TypeReference<>()
+ {
+ }
);
Assert.assertEquals(0, responses.size());
@@ -393,78 +398,85 @@ public QueryRunner getQueryRunnerForSegments(
public void testResponseWithIncludeTrailerHeader() throws IOException
{
queryResource = new QueryResource(
- new QueryLifecycleFactory(
- CONGLOMERATE,
- new QuerySegmentWalker()
- {
- @Override
- public QueryRunner getQueryRunnerForIntervals(
- Query query,
- Iterable intervals
- )
- {
- return (queryPlus, responseContext) -> new Sequence() {
+ new QueryLifecycleFactory(
+ CONGLOMERATE,
+ new QuerySegmentWalker()
+ {
@Override
- public OutType accumulate(OutType initValue, Accumulator accumulator)
+ public QueryRunner getQueryRunnerForIntervals(
+ Query query,
+ Iterable intervals
+ )
{
- if (accumulator instanceof QueryResultPusher.StreamingHttpResponseAccumulator) {
- try {
- ((QueryResultPusher.StreamingHttpResponseAccumulator) accumulator).flush(); // initialized
- }
- catch (IOException ignore) {
+ return (queryPlus, responseContext) -> new Sequence()
+ {
+ @Override
+ public OutType accumulate(OutType initValue, Accumulator accumulator)
+ {
+ if (accumulator instanceof QueryResultPusher.StreamingHttpResponseAccumulator) {
+ try {
+ ((QueryResultPusher.StreamingHttpResponseAccumulator) accumulator).flush(); // initialized
+ }
+ catch (IOException ignore) {
+ }
+ }
+
+ throw new QueryTimeoutException();
}
- }
- throw new QueryTimeoutException();
+ @Override
+ public Yielder toYielder(
+ OutType initValue,
+ YieldingAccumulator accumulator
+ )
+ {
+ return Yielders.done(initValue, null);
+ }
+ };
}
@Override
- public Yielder toYielder(OutType initValue, YieldingAccumulator accumulator)
+ public QueryRunner getQueryRunnerForSegments(
+ Query query,
+ Iterable specs
+ )
{
- return Yielders.done(initValue, null);
+ throw new UnsupportedOperationException();
}
- };
- }
-
- @Override
- public QueryRunner getQueryRunnerForSegments(
- Query query,
- Iterable specs
- )
- {
- throw new UnsupportedOperationException();
- }
- },
- new DefaultGenericQueryMetricsFactory(),
- new NoopServiceEmitter(),
- testRequestLogger,
+ },
+ new DefaultGenericQueryMetricsFactory(),
+ new NoopServiceEmitter(),
+ testRequestLogger,
+ new AuthConfig(),
+ AuthTestUtils.TEST_AUTHORIZER_MAPPER,
+ Suppliers.ofInstance(new DefaultQueryConfig(ImmutableMap.of()))
+ ),
+ jsonMapper,
+ smileMapper,
+ queryScheduler,
new AuthConfig(),
- AuthTestUtils.TEST_AUTHORIZER_MAPPER,
- Suppliers.ofInstance(new DefaultQueryConfig(ImmutableMap.of()))
- ),
- jsonMapper,
- smileMapper,
- queryScheduler,
- new AuthConfig(),
- null,
- ResponseContextConfig.newConfig(true),
- DRUID_NODE
+ null,
+ ResponseContextConfig.newConfig(true),
+ DRUID_NODE
);
expectPermissiveHappyPathAuth();
org.eclipse.jetty.server.Response response = this.jettyResponseforRequest(testServletRequest);
Assert.assertNull(queryResource.doPost(new ByteArrayInputStream(
- SIMPLE_TIMESERIES_QUERY.getBytes(StandardCharsets.UTF_8)),
- null /*pretty*/,
- testServletRequest));
+ SIMPLE_TIMESERIES_QUERY.getBytes(StandardCharsets.UTF_8)),
+ null /*pretty*/,
+ testServletRequest
+ ));
Assert.assertTrue(response.containsHeader(HttpHeader.TRAILER.toString()));
Assert.assertEquals(response.getHeader(HttpHeader.TRAILER.toString()), QueryResultPusher.RESULT_TRAILER_HEADERS);
final HttpFields fields = response.getTrailers().get();
Assert.assertTrue(fields.containsKey(QueryResource.ERROR_MESSAGE_TRAILER_HEADER));
- Assert.assertEquals(fields.get(QueryResource.ERROR_MESSAGE_TRAILER_HEADER),
- "Query did not complete within configured timeout period. You can increase query timeout or tune the performance of query.");
+ Assert.assertEquals(
+ fields.get(QueryResource.ERROR_MESSAGE_TRAILER_HEADER),
+ "Query did not complete within configured timeout period. You can increase query timeout or tune the performance of query."
+ );
Assert.assertTrue(fields.containsKey(QueryResource.RESPONSE_COMPLETE_TRAILER_HEADER));
Assert.assertEquals(fields.get(QueryResource.RESPONSE_COMPLETE_TRAILER_HEADER), "false");
@@ -499,7 +511,8 @@ public void testSuccessResponseWithTrailerHeader() throws IOException
Assert.assertNull(queryResource.doPost(new ByteArrayInputStream(
SIMPLE_TIMESERIES_QUERY.getBytes(StandardCharsets.UTF_8)),
null /*pretty*/,
- testServletRequest));
+ testServletRequest
+ ));
Assert.assertTrue(response.containsHeader(HttpHeader.TRAILER.toString()));
final HttpFields fields = response.getTrailers().get();
@@ -623,7 +636,9 @@ public void testGoodQueryWithQueryConfigDoesNotOverrideQueryContext() throws IOE
final List> responses = jsonMapper.readValue(
response.baos.toByteArray(),
- new TypeReference<>() {}
+ new TypeReference<>()
+ {
+ }
);
Assert.assertNotNull(response);
@@ -822,7 +837,7 @@ public Authorizer getAuthorizer(String name)
public Access authorize(AuthenticationResult authenticationResult, Resource resource, Action action)
{
if (resource.getName().equals("allow")) {
- return new Access(true);
+ return Access.allowWithRestriction(RowFilterPolicy.from(new NullFilter("col", null)));
} else {
return new Access(false);
}
@@ -872,7 +887,9 @@ public Access authorize(AuthenticationResult authenticationResult, Resource reso
final List> responses = jsonMapper.readValue(
response.baos.toByteArray(),
- new TypeReference<>() {}
+ new TypeReference<>()
+ {
+ }
);
Assert.assertEquals(0, responses.size());
diff --git a/server/src/test/java/org/apache/druid/server/security/ForbiddenExceptionTest.java b/server/src/test/java/org/apache/druid/server/security/ForbiddenExceptionTest.java
index dc3bc9144485..4bc395634058 100644
--- a/server/src/test/java/org/apache/druid/server/security/ForbiddenExceptionTest.java
+++ b/server/src/test/java/org/apache/druid/server/security/ForbiddenExceptionTest.java
@@ -19,6 +19,7 @@
package org.apache.druid.server.security;
+import org.apache.druid.query.policy.NoRestrictionPolicy;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
@@ -53,7 +54,8 @@ public void testSanitizeWithTransformFunctionReturningNull()
@Test
public void testSanitizeWithTransformFunctionReturningNewString()
{
- Mockito.when(trasformFunction.apply(ArgumentMatchers.eq(ERROR_MESSAGE_ORIGINAL))).thenReturn(ERROR_MESSAGE_TRANSFORMED);
+ Mockito.when(trasformFunction.apply(ArgumentMatchers.eq(ERROR_MESSAGE_ORIGINAL)))
+ .thenReturn(ERROR_MESSAGE_TRANSFORMED);
ForbiddenException forbiddenException = new ForbiddenException(ERROR_MESSAGE_ORIGINAL);
ForbiddenException actual = forbiddenException.sanitize(trasformFunction);
Assert.assertNotNull(actual);
@@ -66,22 +68,24 @@ public void testSanitizeWithTransformFunctionReturningNewString()
@Test
public void testAccess()
{
- Access access = new Access(false);
+ Access access = Access.deny(null);
Assert.assertFalse(access.isAllowed());
- Assert.assertEquals("", access.getMessage());
- Assert.assertEquals("Allowed:false, Message:", access.toString());
- Assert.assertEquals(Access.DEFAULT_ERROR_MESSAGE, access.toMessage());
+ Assert.assertEquals("Allowed:false, Message:, Policy: null", access.toString());
+ Assert.assertEquals(Access.DEFAULT_ERROR_MESSAGE, access.getMessage());
- access = new Access(true);
+ access = Access.deny("oops");
+ Assert.assertFalse(access.isAllowed());
+ Assert.assertEquals("Allowed:false, Message:oops, Policy: null", access.toString());
+ Assert.assertEquals("Unauthorized, oops", access.getMessage());
+
+ access = Access.allow();
Assert.assertTrue(access.isAllowed());
- Assert.assertEquals("", access.getMessage());
- Assert.assertEquals("Allowed:true, Message:", access.toString());
- Assert.assertEquals("Authorized", access.toMessage());
+ Assert.assertEquals("Allowed:true, Message:, Policy: Optional.empty", access.toString());
+ Assert.assertEquals("Authorized", access.getMessage());
- access = new Access(false, "oops");
- Assert.assertFalse(access.isAllowed());
- Assert.assertEquals("oops", access.getMessage());
- Assert.assertEquals("Allowed:false, Message:oops", access.toString());
- Assert.assertEquals("Allowed:false, Message:oops", access.toMessage());
+ access = Access.allowWithRestriction(NoRestrictionPolicy.instance());
+ Assert.assertTrue(access.isAllowed());
+ Assert.assertEquals("Allowed:true, Message:, Policy: Optional[NO_RESTRICTION]", access.toString());
+ Assert.assertEquals("Authorized, with restriction [NO_RESTRICTION]", access.getMessage());
}
}
diff --git a/sql/src/main/java/org/apache/druid/sql/AbstractStatement.java b/sql/src/main/java/org/apache/druid/sql/AbstractStatement.java
index bfa95c5d5562..ee5cb290e9a5 100644
--- a/sql/src/main/java/org/apache/druid/sql/AbstractStatement.java
+++ b/sql/src/main/java/org/apache/druid/sql/AbstractStatement.java
@@ -21,8 +21,8 @@
import org.apache.druid.java.util.common.logger.Logger;
import org.apache.druid.query.QueryContexts;
-import org.apache.druid.server.security.Access;
import org.apache.druid.server.security.Action;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.AuthorizationUtils;
import org.apache.druid.server.security.ForbiddenException;
import org.apache.druid.server.security.Resource;
@@ -36,6 +36,7 @@
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
+import java.util.Objects;
import java.util.Set;
import java.util.UUID;
import java.util.function.Function;
@@ -137,7 +138,7 @@ protected void validate(final DruidPlanner planner)
*/
protected void authorize(
final DruidPlanner planner,
- final Function, Access> authorizer
+ final Function, AuthorizationResult> authorizer
)
{
Set securedKeys = this.sqlToolbox.plannerFactory.getAuthConfig()
@@ -150,16 +151,15 @@ protected void authorize(
// Authentication is done by the planner using the function provided
// here. The planner ensures that this step is done before planning.
authResult = planner.authorize(authorizer, contextResources);
- if (!authResult.authorizationResult.isAllowed()) {
- throw new ForbiddenException(authResult.authorizationResult.toMessage());
+ if (!authResult.authorizationResult.allowBasicAccess()) {
+ throw new ForbiddenException(authResult.authorizationResult.getErrorMessage());
}
}
/**
- * Resource authorizer based on the authentication result
- * provided earlier.
+ * Returns an authorizer that can provide authorization result given a set of required resource actions and authentication result.
*/
- protected Function, Access> authorizer()
+ protected Function, AuthorizationResult> authorizer()
{
return resourceActions ->
AuthorizationUtils.authorizeAllResourceActions(
@@ -175,12 +175,12 @@ protected Function, Access> authorizer()
*/
public Set resources()
{
- return authResult.sqlResourceActions;
+ return Objects.requireNonNull(authResult.sqlResourceActions);
}
public Set allResources()
{
- return authResult.allResourceActions;
+ return Objects.requireNonNull(authResult.allResourceActions);
}
public SqlQueryPlus query()
diff --git a/sql/src/main/java/org/apache/druid/sql/HttpStatement.java b/sql/src/main/java/org/apache/druid/sql/HttpStatement.java
index 52bef0a04f07..d02f8c6b444d 100644
--- a/sql/src/main/java/org/apache/druid/sql/HttpStatement.java
+++ b/sql/src/main/java/org/apache/druid/sql/HttpStatement.java
@@ -19,7 +19,7 @@
package org.apache.druid.sql;
-import org.apache.druid.server.security.Access;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.AuthorizationUtils;
import org.apache.druid.server.security.ResourceAction;
import org.apache.druid.sql.http.SqlQuery;
@@ -51,21 +51,21 @@ public HttpStatement(
super(
lifecycleToolbox,
SqlQueryPlus.builder(sqlQuery)
- .auth(AuthorizationUtils.authenticationResultFromRequest(req))
- .build(),
+ .auth(AuthorizationUtils.authenticationResultFromRequest(req))
+ .build(),
req.getRemoteAddr()
);
this.req = req;
}
@Override
- protected Function, Access> authorizer()
+ protected Function, AuthorizationResult> authorizer()
{
return resourceActions ->
- AuthorizationUtils.authorizeAllResourceActions(
- req,
- resourceActions,
- sqlToolbox.plannerFactory.getAuthorizerMapper()
- );
+ AuthorizationUtils.authorizeAllResourceActions(
+ req,
+ resourceActions,
+ sqlToolbox.plannerFactory.getAuthorizerMapper()
+ );
}
}
diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java
index 03ef94656c5f..07a632f9c82d 100644
--- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java
+++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java
@@ -35,7 +35,7 @@
import org.apache.druid.error.DruidException;
import org.apache.druid.error.InvalidSqlInput;
import org.apache.druid.query.QueryContext;
-import org.apache.druid.server.security.Access;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.Resource;
import org.apache.druid.server.security.ResourceAction;
import org.apache.druid.sql.calcite.parser.DruidSqlInsert;
@@ -74,7 +74,7 @@ public enum State
public static class AuthResult
{
- public final Access authorizationResult;
+ public final AuthorizationResult authorizationResult;
/**
* Resource actions used with authorizing a cancellation request. These actions
@@ -90,7 +90,7 @@ public static class AuthResult
public final Set allResourceActions;
public AuthResult(
- final Access authorizationResult,
+ final AuthorizationResult authorizationResult,
final Set sqlResourceActions,
final Set allResourceActions
)
@@ -183,7 +183,7 @@ private SqlStatementHandler createHandler(final SqlNode node)
/**
* Uses {@link SqlParameterizerShuttle} to rewrite {@link SqlNode} to swap out any
* {@link org.apache.calcite.sql.SqlDynamicParam} early for their {@link org.apache.calcite.sql.SqlLiteral}
- * replacement.
+ * replacement.
*
* @return a rewritten {@link SqlNode} with any dynamic parameters rewritten in the provided {@code original} node,
* if they were present.
@@ -226,14 +226,14 @@ public PrepareResult prepare()
* Authorizes the statement. Done within the planner to enforce the authorization
* step within the planner's state machine.
*
- * @param authorizer a function from resource actions to a {@link Access} result.
+ * @param authorizer a function produces {@link AuthorizationResult} based on resource actions.
* @param extraActions set of additional resource actions beyond those inferred
* from the query itself. Specifically, the set of context keys to
* authorize.
* @return the return value from the authorizer
*/
public AuthResult authorize(
- final Function, Access> authorizer,
+ final Function, AuthorizationResult> authorizer,
final Set extraActions
)
{
@@ -241,14 +241,14 @@ public AuthResult authorize(
Set sqlResourceActions = plannerContext.getResourceActions();
Set allResourceActions = new HashSet<>(sqlResourceActions);
allResourceActions.addAll(extraActions);
- Access access = authorizer.apply(allResourceActions);
- plannerContext.setAuthorizationResult(access);
+ AuthorizationResult authorizationResult = authorizer.apply(allResourceActions);
+ plannerContext.setAuthorizationResult(authorizationResult);
// Authorization is done as a flag, not a state, alas.
// Views prepare without authorization, Avatica does authorize, then prepare,
// so the only constraint is that authorization be done before planning.
authorized = true;
- return new AuthResult(access, sqlResourceActions, allResourceActions);
+ return new AuthResult(authorizationResult, sqlResourceActions, allResourceActions);
}
/**
diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerContext.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerContext.java
index c6e600d8a757..46e55c24c8bc 100644
--- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerContext.java
+++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerContext.java
@@ -47,8 +47,8 @@
import org.apache.druid.query.lookup.RegisteredLookupExtractionFn;
import org.apache.druid.segment.join.JoinableFactoryWrapper;
import org.apache.druid.server.lookup.cache.LookupLoadingSpec;
-import org.apache.druid.server.security.Access;
import org.apache.druid.server.security.AuthenticationResult;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.ResourceAction;
import org.apache.druid.sql.calcite.expression.SqlOperatorConversion;
import org.apache.druid.sql.calcite.expression.builtin.QueryLookupOperatorConversion;
@@ -146,7 +146,7 @@ public class PlannerContext
// set of datasources and views which must be authorized, initialized to null so we can detect if it has been set.
private Set resourceActions;
// result of authorizing set of resources against authentication identity
- private Access authorizationResult;
+ private AuthorizationResult authorizationResult;
// error messages encountered while planning the query
@Nullable
private String planningError;
@@ -574,7 +574,7 @@ public Object get(final String name)
}
- public Access getAuthorizationResult()
+ public AuthorizationResult getAuthorizationResult()
{
return Preconditions.checkNotNull(authorizationResult, "Authorization result not available");
}
@@ -594,7 +594,7 @@ public void setAuthenticationResult(AuthenticationResult authenticationResult)
this.authenticationResult = Preconditions.checkNotNull(authenticationResult, "authenticationResult");
}
- public void setAuthorizationResult(Access access)
+ public void setAuthorizationResult(AuthorizationResult access)
{
if (this.authorizationResult != null) {
// It's a bug if this happens, because setAuthorizationResult should be called exactly once.
@@ -636,7 +636,7 @@ public SqlEngine getEngine()
/**
* Checks if the current {@link SqlEngine} supports a particular feature.
- *
+ *
* When executing a specific query, use this method instead of {@link SqlEngine#featureAvailable(EngineFeature)}
* because it also verifies feature flags.
*/
diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerFactory.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerFactory.java
index c21f6408b52f..3e69d275471f 100644
--- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerFactory.java
+++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerFactory.java
@@ -41,8 +41,8 @@
import org.apache.druid.math.expr.ExprMacroTable;
import org.apache.druid.query.QueryContexts;
import org.apache.druid.segment.join.JoinableFactoryWrapper;
-import org.apache.druid.server.security.Access;
import org.apache.druid.server.security.AuthConfig;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.NoopEscalator;
import org.apache.druid.sql.calcite.parser.DruidSqlParserImplFactory;
@@ -127,13 +127,17 @@ public DruidPlanner createPlanner(
* and ready to go authorization result.
*/
@VisibleForTesting
- public DruidPlanner createPlannerForTesting(final SqlEngine engine, final String sql, final Map queryContext)
+ public DruidPlanner createPlannerForTesting(
+ final SqlEngine engine,
+ final String sql,
+ final Map queryContext
+ )
{
final DruidPlanner thePlanner = createPlanner(engine, sql, queryContext, null);
thePlanner.getPlannerContext()
.setAuthenticationResult(NoopEscalator.getInstance().createEscalatedAuthenticationResult());
thePlanner.validate();
- thePlanner.authorize(ra -> Access.OK, ImmutableSet.of());
+ thePlanner.authorize(ra -> AuthorizationResult.ALLOW_NO_RESTRICTION, ImmutableSet.of());
return thePlanner;
}
diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeQueryMaker.java b/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeQueryMaker.java
index 7b1e1ec7091d..836404319c95 100644
--- a/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeQueryMaker.java
+++ b/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeQueryMaker.java
@@ -45,8 +45,8 @@
import org.apache.druid.server.QueryLifecycle;
import org.apache.druid.server.QueryLifecycleFactory;
import org.apache.druid.server.QueryResponse;
-import org.apache.druid.server.security.Access;
import org.apache.druid.server.security.AuthenticationResult;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.sql.calcite.planner.PlannerConfig;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.CannotBuildQueryException;
@@ -185,14 +185,18 @@ private QueryResponse execute(
query = query.withSqlQueryId(plannerContext.getSqlQueryId());
final AuthenticationResult authenticationResult = plannerContext.getAuthenticationResult();
- final Access authorizationResult = plannerContext.getAuthorizationResult();
+ final AuthorizationResult authorizationResult = plannerContext.getAuthorizationResult();
final QueryLifecycle queryLifecycle = queryLifecycleFactory.factorize();
// After calling "runSimple" the query will start running. We need to do this before reading the toolChest, since
// otherwise it won't yet be initialized. (A bummer, since ideally, we'd verify the toolChest exists and can do
// array-based results before starting the query; but in practice we don't expect this to happen since we keep
// tight control over which query types we generate in the SQL layer. They all support array-based results.)
- final QueryResponse results = queryLifecycle.runSimple((Query) query, authenticationResult, authorizationResult);
+ final QueryResponse results = queryLifecycle.runSimple(
+ (Query) query,
+ authenticationResult,
+ authorizationResult
+ );
return mapResultSequence(
results,
diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java
index 37f8c50b4733..5794fb6dd84c 100644
--- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java
+++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java
@@ -73,9 +73,9 @@
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.segment.metadata.AvailableSegmentMetadata;
import org.apache.druid.server.DruidNode;
-import org.apache.druid.server.security.Access;
import org.apache.druid.server.security.Action;
import org.apache.druid.server.security.AuthenticationResult;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.AuthorizationUtils;
import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.ForbiddenException;
@@ -244,11 +244,22 @@ public SystemSchema(
{
Preconditions.checkNotNull(serverView, "serverView");
this.tableMap = ImmutableMap.of(
- SEGMENTS_TABLE, new SegmentsTable(druidSchema, metadataView, jsonMapper, authorizerMapper),
- SERVERS_TABLE, new ServersTable(druidNodeDiscoveryProvider, serverInventoryView, authorizerMapper, overlordClient, coordinatorDruidLeaderClient),
- SERVER_SEGMENTS_TABLE, new ServerSegmentsTable(serverView, authorizerMapper),
- TASKS_TABLE, new TasksTable(overlordClient, authorizerMapper),
- SUPERVISOR_TABLE, new SupervisorsTable(overlordClient, authorizerMapper)
+ SEGMENTS_TABLE,
+ new SegmentsTable(druidSchema, metadataView, jsonMapper, authorizerMapper),
+ SERVERS_TABLE,
+ new ServersTable(
+ druidNodeDiscoveryProvider,
+ serverInventoryView,
+ authorizerMapper,
+ overlordClient,
+ coordinatorDruidLeaderClient
+ ),
+ SERVER_SEGMENTS_TABLE,
+ new ServerSegmentsTable(serverView, authorizerMapper),
+ TASKS_TABLE,
+ new TasksTable(overlordClient, authorizerMapper),
+ SUPERVISOR_TABLE,
+ new SupervisorsTable(overlordClient, authorizerMapper)
);
}
@@ -1135,19 +1146,20 @@ private static void checkStateReadAccessForServers(
AuthorizerMapper authorizerMapper
)
{
- final Access stateAccess = AuthorizationUtils.authorizeAllResourceActions(
+ final AuthorizationResult authResult = AuthorizationUtils.authorizeAllResourceActions(
authenticationResult,
Collections.singletonList(new ResourceAction(Resource.STATE_RESOURCE, Action.READ)),
authorizerMapper
);
- if (!stateAccess.isAllowed()) {
- throw new ForbiddenException("Insufficient permission to view servers: " + stateAccess.toMessage());
+
+ if (!authResult.allowAccessWithNoRestriction()) {
+ throw new ForbiddenException("Insufficient permission to view servers: " + authResult.getErrorMessage());
}
}
/**
* Project a row using "projects" from {@link SegmentsTable#scan(DataContext, List, int[])}.
- *
+ *
* Also, fix up types so {@link ColumnType#STRING} are transformed to Strings if they aren't yet. This defers
* computation of {@link ObjectMapper#writeValueAsString(Object)} or {@link Object#toString()} until we know we
* actually need it.
diff --git a/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java b/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java
index d957e7155b5e..b53b49f0fcaa 100644
--- a/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java
+++ b/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java
@@ -33,7 +33,7 @@
import org.apache.druid.server.QueryResultPusher;
import org.apache.druid.server.ResponseContextConfig;
import org.apache.druid.server.initialization.ServerConfig;
-import org.apache.druid.server.security.Access;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.AuthorizationUtils;
import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.ResourceAction;
@@ -140,9 +140,9 @@ public Response cancelQuery(
return Response.status(Status.NOT_FOUND).build();
}
- final Access access = authorizeCancellation(req, lifecycles);
+ final AuthorizationResult authResult = authorizeCancellation(req, lifecycles);
- if (access.isAllowed()) {
+ if (authResult.allowAccessWithNoRestriction()) {
// should remove only the lifecycles in the snapshot.
sqlLifecycleManager.removeAll(sqlQueryId, lifecycles);
lifecycles.forEach(Cancelable::cancel);
@@ -332,11 +332,11 @@ public void writeException(Exception ex, OutputStream out) throws IOException
/**
* Authorize a query cancellation operation.
- *
+ *
* Considers only datasource and table resources; not context key resources when checking permissions. This means
* that a user's permission to cancel a query depends on the datasource, not the context variables used in the query.
*/
- public Access authorizeCancellation(final HttpServletRequest req, final List cancelables)
+ public AuthorizationResult authorizeCancellation(final HttpServletRequest req, final List cancelables)
{
Set resources = cancelables
.stream()
diff --git a/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java b/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java
index 86cc2e4a6522..50f05fd98fe9 100644
--- a/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java
+++ b/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java
@@ -559,6 +559,12 @@ public void testDatabaseMetaDataTables() throws SQLException
Pair.of("TABLE_SCHEM", "druid"),
Pair.of("TABLE_TYPE", "TABLE")
),
+ row(
+ Pair.of("TABLE_CAT", "druid"),
+ Pair.of("TABLE_NAME", CalciteTests.RESTRICTED_DATASOURCE),
+ Pair.of("TABLE_SCHEM", "druid"),
+ Pair.of("TABLE_TYPE", "TABLE")
+ ),
row(
Pair.of("TABLE_CAT", "druid"),
Pair.of("TABLE_NAME", CalciteTests.SOME_DATASOURCE),
@@ -651,6 +657,12 @@ public void testDatabaseMetaDataTablesAsSuperuser() throws SQLException
Pair.of("TABLE_SCHEM", "druid"),
Pair.of("TABLE_TYPE", "TABLE")
),
+ row(
+ Pair.of("TABLE_CAT", "druid"),
+ Pair.of("TABLE_NAME", CalciteTests.RESTRICTED_DATASOURCE),
+ Pair.of("TABLE_SCHEM", "druid"),
+ Pair.of("TABLE_TYPE", "TABLE")
+ ),
row(
Pair.of("TABLE_CAT", "druid"),
Pair.of("TABLE_NAME", CalciteTests.SOME_DATASOURCE),
diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java
index 2784f929df5a..6e8b0434bece 100644
--- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java
+++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java
@@ -82,7 +82,7 @@
import org.apache.druid.segment.join.JoinType;
import org.apache.druid.segment.virtual.ListFilteredVirtualColumn;
import org.apache.druid.server.QueryLifecycle;
-import org.apache.druid.server.security.Access;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.sql.calcite.DecoupledTestConfig.IgnoreQueriesReason;
import org.apache.druid.sql.calcite.DecoupledTestConfig.QuidemTestCaseReason;
import org.apache.druid.sql.calcite.NotYetSupported.Modes;
@@ -5050,7 +5050,7 @@ public void testGroupByJoinAsNativeQueryWithUnoptimizedFilter(Map results = seq.toList();
Assert.assertEquals(
ImmutableList.of(ResultRow.of("def")),
diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java
index 4323d7d54314..f1c8184e5a45 100644
--- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java
+++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java
@@ -189,6 +189,7 @@ public void testInformationSchemaTables()
.add(new Object[]{"druid", CalciteTests.DATASOURCE4, "TABLE", "NO", "NO"})
.add(new Object[]{"druid", CalciteTests.DATASOURCE5, "TABLE", "NO", "NO"})
.add(new Object[]{"druid", CalciteTests.DATASOURCE3, "TABLE", "NO", "NO"})
+ .add(new Object[]{"druid", CalciteTests.RESTRICTED_DATASOURCE, "TABLE", "NO", "NO"})
.add(new Object[]{"druid", CalciteTests.SOME_DATASOURCE, "TABLE", "NO", "NO"})
.add(new Object[]{"druid", CalciteTests.SOMEXDATASOURCE, "TABLE", "NO", "NO"})
.add(new Object[]{"druid", CalciteTests.USERVISITDATASOURCE, "TABLE", "NO", "NO"})
@@ -231,6 +232,7 @@ public void testInformationSchemaTables()
.add(new Object[]{"druid", CalciteTests.FORBIDDEN_DATASOURCE, "TABLE", "NO", "NO"})
.add(new Object[]{"druid", CalciteTests.DATASOURCE5, "TABLE", "NO", "NO"})
.add(new Object[]{"druid", CalciteTests.DATASOURCE3, "TABLE", "NO", "NO"})
+ .add(new Object[]{"druid", CalciteTests.RESTRICTED_DATASOURCE, "TABLE", "NO", "NO"})
.add(new Object[]{"druid", CalciteTests.SOME_DATASOURCE, "TABLE", "NO", "NO"})
.add(new Object[]{"druid", CalciteTests.SOMEXDATASOURCE, "TABLE", "NO", "NO"})
.add(new Object[]{"druid", CalciteTests.USERVISITDATASOURCE, "TABLE", "NO", "NO"})
diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteSelectQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteSelectQueryTest.java
index c251eb212320..871ecd211ee7 100644
--- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteSelectQueryTest.java
+++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteSelectQueryTest.java
@@ -1156,6 +1156,46 @@ public void testSelectStar()
);
}
+ @Test
+ public void testCountRestrictedTable_shouldFilterOnPolicy()
+ {
+ testQuery(
+ PLANNER_CONFIG_DEFAULT,
+ "SELECT COUNT(*) FROM druid.restrictedDatasource_m1_is_6",
+ CalciteTests.SUPER_USER_AUTH_RESULT,
+ ImmutableList.of(
+ Druids.newTimeseriesQueryBuilder()
+ .dataSource(CalciteTests.RESTRICTED_DATASOURCE)
+ .intervals(querySegmentSpec(Filtration.eternity()))
+ .granularity(Granularities.ALL)
+ .aggregators(aggregators(new CountAggregatorFactory("a0")))
+ .context(QUERY_CONTEXT_DEFAULT)
+ .build()
+ ),
+ ImmutableList.of(
+ new Object[]{6L} // superuser can see all records
+ )
+ );
+
+ testQuery(
+ PLANNER_CONFIG_DEFAULT,
+ "SELECT COUNT(*) FROM druid.restrictedDatasource_m1_is_6",
+ CalciteTests.REGULAR_USER_AUTH_RESULT,
+ ImmutableList.of(
+ Druids.newTimeseriesQueryBuilder()
+ .dataSource(CalciteTests.RESTRICTED_DATASOURCE)
+ .intervals(querySegmentSpec(Filtration.eternity()))
+ .granularity(Granularities.ALL)
+ .aggregators(aggregators(new CountAggregatorFactory("a0")))
+ .context(QUERY_CONTEXT_DEFAULT)
+ .build()
+ ),
+ ImmutableList.of(
+ new Object[]{1L} // regular user can only see 1 record based on the policy
+ )
+ );
+ }
+
@Test
public void testSelectStarOnForbiddenTable()
{
diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/schema/BrokerSegmentMetadataCacheTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/schema/BrokerSegmentMetadataCacheTest.java
index b613c602f633..3aef6d84061f 100644
--- a/sql/src/test/java/org/apache/druid/sql/calcite/schema/BrokerSegmentMetadataCacheTest.java
+++ b/sql/src/test/java/org/apache/druid/sql/calcite/schema/BrokerSegmentMetadataCacheTest.java
@@ -69,8 +69,8 @@
import org.apache.druid.server.coordination.DruidServerMetadata;
import org.apache.druid.server.coordination.ServerType;
import org.apache.druid.server.metrics.NoopServiceEmitter;
-import org.apache.druid.server.security.Access;
import org.apache.druid.server.security.AllowAllAuthenticator;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.NoopEscalator;
import org.apache.druid.sql.calcite.table.DatasourceTable;
import org.apache.druid.sql.calcite.table.DruidTable;
@@ -141,7 +141,10 @@ public BrokerSegmentMetadataCache buildSchemaMarkAndTableLatch() throws Interrup
return buildSchemaMarkAndTableLatch(SEGMENT_CACHE_CONFIG_DEFAULT, new NoopCoordinatorClient());
}
- public BrokerSegmentMetadataCache buildSchemaMarkAndTableLatch(BrokerSegmentMetadataCacheConfig config, CoordinatorClient coordinatorClient) throws InterruptedException
+ public BrokerSegmentMetadataCache buildSchemaMarkAndTableLatch(
+ BrokerSegmentMetadataCacheConfig config,
+ CoordinatorClient coordinatorClient
+ ) throws InterruptedException
{
Preconditions.checkState(runningSchema == null);
runningSchema = new BrokerSegmentMetadataCache(
@@ -203,7 +206,8 @@ public void markDataSourceAsNeedRebuild(String datasource)
@VisibleForTesting
public void refresh(
final Set segmentsToRefresh,
- final Set dataSourcesToRebuild) throws IOException
+ final Set dataSourcesToRebuild
+ ) throws IOException
{
super.refresh(segmentsToRefresh, dataSourcesToRebuild);
refreshLatch.countDown();
@@ -226,14 +230,18 @@ public void testCoordinatorReturnsAllDSSchema() throws InterruptedException
final RowSignature someDataSourceRowSignature = new QueryableIndexCursorFactory(indexAuto1).getRowSignature();
final RowSignature foo3RowSignature = new QueryableIndexCursorFactory(indexAuto2).getRowSignature();
- NoopCoordinatorClient coordinatorClient = new NoopCoordinatorClient() {
+ NoopCoordinatorClient coordinatorClient = new NoopCoordinatorClient()
+ {
@Override
public ListenableFuture> fetchDataSourceInformation(Set datasources)
{
Map dataSourceInformationMap = new HashMap<>();
dataSourceInformationMap.put(DATASOURCE1, new DataSourceInformation(DATASOURCE1, dataSource1RowSignature));
dataSourceInformationMap.put(DATASOURCE2, new DataSourceInformation(DATASOURCE2, dataSource2RowSignature));
- dataSourceInformationMap.put(SOME_DATASOURCE, new DataSourceInformation(SOME_DATASOURCE, someDataSourceRowSignature));
+ dataSourceInformationMap.put(
+ SOME_DATASOURCE,
+ new DataSourceInformation(SOME_DATASOURCE, someDataSourceRowSignature)
+ );
dataSourceInformationMap.put("foo3", new DataSourceInformation("foo3", foo3RowSignature));
return Futures.immediateFuture(new ArrayList<>(dataSourceInformationMap.values()));
@@ -258,7 +266,12 @@ public ListenableFuture> fetchDataSourceInformation(
schema.start();
schema.awaitInitialization();
final Set tableNames = schema.getDatasourceNames();
- Assert.assertEquals(ImmutableSet.of(CalciteTests.DATASOURCE1, CalciteTests.DATASOURCE2, CalciteTests.SOME_DATASOURCE, "foo3"), tableNames);
+ Assert.assertEquals(ImmutableSet.of(
+ CalciteTests.DATASOURCE1,
+ CalciteTests.DATASOURCE2,
+ CalciteTests.SOME_DATASOURCE,
+ "foo3"
+ ), tableNames);
Assert.assertEquals(dataSource1RowSignature, schema.getDatasource(DATASOURCE1).getRowSignature());
Assert.assertEquals(dataSource2RowSignature, schema.getDatasource(DATASOURCE2).getRowSignature());
@@ -277,14 +290,18 @@ public void testCoordinatorReturnsFewDSSchema() throws InterruptedException
final RowSignature dataSource2RowSignature = new QueryableIndexCursorFactory(index2).getRowSignature();
final RowSignature someDataSourceRowSignature = new QueryableIndexCursorFactory(indexAuto1).getRowSignature();
- NoopCoordinatorClient coordinatorClient = new NoopCoordinatorClient() {
+ NoopCoordinatorClient coordinatorClient = new NoopCoordinatorClient()
+ {
@Override
public ListenableFuture> fetchDataSourceInformation(Set datasources)
{
Map dataSourceInformationMap = new HashMap<>();
dataSourceInformationMap.put(DATASOURCE1, new DataSourceInformation(DATASOURCE1, dataSource1RowSignature));
dataSourceInformationMap.put(DATASOURCE2, new DataSourceInformation(DATASOURCE2, dataSource2RowSignature));
- dataSourceInformationMap.put(SOME_DATASOURCE, new DataSourceInformation(SOME_DATASOURCE, someDataSourceRowSignature));
+ dataSourceInformationMap.put(
+ SOME_DATASOURCE,
+ new DataSourceInformation(SOME_DATASOURCE, someDataSourceRowSignature)
+ );
return Futures.immediateFuture(new ArrayList<>(dataSourceInformationMap.values()));
}
};
@@ -304,7 +321,11 @@ public ListenableFuture> fetchDataSourceInformation(
QueryLifecycleFactory factoryMock = EasyMock.createMock(QueryLifecycleFactory.class);
QueryLifecycle lifecycleMock = EasyMock.createMock(QueryLifecycle.class);
EasyMock.expect(factoryMock.factorize()).andReturn(lifecycleMock).once();
- EasyMock.expect(lifecycleMock.runSimple(expectedMetadataQuery, AllowAllAuthenticator.ALLOW_ALL_RESULT, Access.OK))
+ EasyMock.expect(lifecycleMock.runSimple(
+ expectedMetadataQuery,
+ AllowAllAuthenticator.ALLOW_ALL_RESULT,
+ AuthorizationResult.ALLOW_NO_RESTRICTION
+ ))
.andReturn(QueryResponse.withEmptyContext(Sequences.empty()));
BrokerSegmentMetadataCache schema = new BrokerSegmentMetadataCache(
@@ -335,7 +356,8 @@ public void testBrokerPollsAllDSSchema() throws InterruptedException
{
ArgumentCaptor> argumentCaptor = ArgumentCaptor.forClass(Set.class);
CoordinatorClient coordinatorClient = Mockito.mock(CoordinatorClient.class);
- Mockito.when(coordinatorClient.fetchDataSourceInformation(argumentCaptor.capture())).thenReturn(Futures.immediateFuture(null));
+ Mockito.when(coordinatorClient.fetchDataSourceInformation(argumentCaptor.capture()))
+ .thenReturn(Futures.immediateFuture(null));
Set datsources = Sets.newHashSet(DATASOURCE1, DATASOURCE2, DATASOURCE3, SOME_DATASOURCE, "xyz", "coldDS");
Mockito.when(coordinatorClient.fetchDataSourcesWithUsedSegments()).thenReturn(Futures.immediateFuture(datsources));
@@ -386,7 +408,8 @@ public void testRefreshOnEachCycleCentralizedDatasourceSchemaEnabled() throws In
new PhysicalDatasourceMetadataFactory(globalTableJoinable, segmentManager),
new NoopCoordinatorClient(),
config
- ) {
+ )
+ {
@Override
public void refresh(Set segmentsToRefresh, Set dataSourcesToRebuild)
throws IOException
@@ -425,7 +448,8 @@ public void testRefreshOnEachCycleCentralizedDatasourceSchemaDisabled() throws I
new PhysicalDatasourceMetadataFactory(globalTableJoinable, segmentManager),
new NoopCoordinatorClient(),
CentralizedDatasourceSchemaConfig.create()
- ) {
+ )
+ {
@Override
public void refresh(Set segmentsToRefresh, Set dataSourcesToRebuild)
throws IOException
@@ -449,7 +473,11 @@ public void refresh(Set segmentsToRefresh, Set dataSourcesToR
public void testGetTableMap() throws InterruptedException
{
BrokerSegmentMetadataCache schema = buildSchemaMarkAndTableLatch();
- Assert.assertEquals(ImmutableSet.of(CalciteTests.DATASOURCE1, CalciteTests.DATASOURCE2, CalciteTests.SOME_DATASOURCE), schema.getDatasourceNames());
+ Assert.assertEquals(ImmutableSet.of(
+ CalciteTests.DATASOURCE1,
+ CalciteTests.DATASOURCE2,
+ CalciteTests.SOME_DATASOURCE
+ ), schema.getDatasourceNames());
}
@Test
@@ -509,7 +537,8 @@ public void testGetTableMapSomeTable() throws InterruptedException
// using 'newest first' column type merge strategy, the types are expected to be the types defined in the newer
// segment, except for json, which is special handled
BrokerSegmentMetadataCache schema = buildSchemaMarkAndTableLatch(
- new BrokerSegmentMetadataCacheConfig() {
+ new BrokerSegmentMetadataCacheConfig()
+ {
@Override
public AbstractSegmentMetadataCache.ColumnTypeMergePolicy getMetadataColumnTypeMergePolicy()
{
@@ -603,6 +632,7 @@ public void testGetTableMapSomeTableLeastRestrictiveTypeMerge() throws Interrupt
* This tests that {@link AvailableSegmentMetadata#getNumRows()} is correct in case
* of multiple replicas i.e. when {@link AbstractSegmentMetadataCache#addSegment(DruidServerMetadata, DataSegment)}
* is called more than once for same segment
+ *
* @throws InterruptedException
*/
@Test
@@ -720,7 +750,8 @@ public void markDataSourceAsNeedRebuild(String datasource)
@VisibleForTesting
public void refresh(
final Set segmentsToRefresh,
- final Set dataSourcesToRebuild) throws IOException
+ final Set dataSourcesToRebuild
+ ) throws IOException
{
super.refresh(segmentsToRefresh, dataSourcesToRebuild);
}
@@ -731,9 +762,9 @@ public void refresh(
final Map segmentMetadatas = schema.getSegmentMetadataSnapshot();
List segments = segmentMetadatas.values()
- .stream()
- .map(AvailableSegmentMetadata::getSegment)
- .collect(Collectors.toList());
+ .stream()
+ .map(AvailableSegmentMetadata::getSegment)
+ .collect(Collectors.toList());
Assert.assertEquals(6, segments.size());
// verify that dim3 column isn't present in the schema for foo
@@ -769,20 +800,20 @@ public void refresh(
);
QueryableIndex index = IndexBuilder.create()
- .tmpDir(new File(tmpDir, "1"))
- .segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance())
- .schema(
- new IncrementalIndexSchema.Builder()
- .withMetrics(
- new CountAggregatorFactory("cnt"),
- new DoubleSumAggregatorFactory("m1", "m1"),
- new HyperUniquesAggregatorFactory("unique_dim1", "dim1")
- )
- .withRollup(false)
- .build()
- )
- .rows(rows)
- .buildMMappedIndex();
+ .tmpDir(new File(tmpDir, "1"))
+ .segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance())
+ .schema(
+ new IncrementalIndexSchema.Builder()
+ .withMetrics(
+ new CountAggregatorFactory("cnt"),
+ new DoubleSumAggregatorFactory("m1", "m1"),
+ new HyperUniquesAggregatorFactory("unique_dim1", "dim1")
+ )
+ .withRollup(false)
+ .build()
+ )
+ .rows(rows)
+ .buildMMappedIndex();
walker.add(newSegment, index);
serverView.addSegment(newSegment, ServerType.HISTORICAL);
@@ -839,11 +870,11 @@ public void testNullAvailableSegmentMetadata() throws IOException, InterruptedEx
/**
* Test actions on the cache. The current design of the cache makes testing far harder
* than it should be.
- *
+ *
* - The cache is refreshed on a schedule.
* - Datasources are added to the refresh queue via an unsynchronized thread.
* - The refresh loop always refreshes since one of the segments is dynamic.
- *
+ *
* The use of latches tries to keep things synchronized, but there are many
* moving parts. A simpler technique is sorely needed.
*/
@@ -1038,7 +1069,11 @@ public void testRunSegmentMetadataQueryWithContext() throws Exception
EasyMock.expect(factoryMock.factorize()).andReturn(lifecycleMock).once();
// This is the mat of the test, making sure that the query created by the method under test matches the expected query, specifically the operator configured context
- EasyMock.expect(lifecycleMock.runSimple(expectedMetadataQuery, AllowAllAuthenticator.ALLOW_ALL_RESULT, Access.OK))
+ EasyMock.expect(lifecycleMock.runSimple(
+ expectedMetadataQuery,
+ AllowAllAuthenticator.ALLOW_ALL_RESULT,
+ AuthorizationResult.ALLOW_NO_RESTRICTION
+ ))
.andReturn(QueryResponse.withEmptyContext(Sequences.empty()));
EasyMock.replay(factoryMock, lifecycleMock);
@@ -1130,9 +1165,9 @@ public void testNoDatasourceSchemaWhenNoSegmentMetadata() throws InterruptedExce
schema.awaitInitialization();
List segments = schema.getSegmentMetadataSnapshot().values()
- .stream()
- .map(AvailableSegmentMetadata::getSegment)
- .collect(Collectors.toList());
+ .stream()
+ .map(AvailableSegmentMetadata::getSegment)
+ .collect(Collectors.toList());
schema.refresh(segments.stream().map(DataSegment::getId).collect(Collectors.toSet()), Collections.singleton("foo"));
@@ -1186,9 +1221,9 @@ public void testTombstoneSegmentIsNotRefreshed() throws IOException
.build();
final ImmutableDruidServer historicalServer = druidServers.stream()
- .filter(s -> s.getType().equals(ServerType.HISTORICAL))
- .findAny()
- .orElse(null);
+ .filter(s -> s.getType().equals(ServerType.HISTORICAL))
+ .findAny()
+ .orElse(null);
Assert.assertNotNull(historicalServer);
final DruidServerMetadata historicalServerMetadata = historicalServer.getMetadata();
@@ -1217,7 +1252,11 @@ public void testTombstoneSegmentIsNotRefreshed() throws IOException
);
EasyMock.expect(factoryMock.factorize()).andReturn(lifecycleMock).once();
- EasyMock.expect(lifecycleMock.runSimple(expectedMetadataQuery, AllowAllAuthenticator.ALLOW_ALL_RESULT, Access.OK))
+ EasyMock.expect(lifecycleMock.runSimple(
+ expectedMetadataQuery,
+ AllowAllAuthenticator.ALLOW_ALL_RESULT,
+ AuthorizationResult.ALLOW_NO_RESTRICTION
+ ))
.andReturn(QueryResponse.withEmptyContext(Sequences.empty()));
EasyMock.replay(factoryMock, lifecycleMock);
diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java b/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java
index 14f5729a747c..25396de322a8 100644
--- a/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java
+++ b/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java
@@ -52,8 +52,12 @@
import org.apache.druid.math.expr.ExprMacroTable;
import org.apache.druid.query.QueryRunnerFactoryConglomerate;
import org.apache.druid.query.QuerySegmentWalker;
+import org.apache.druid.query.policy.NoRestrictionPolicy;
+import org.apache.druid.query.policy.Policy;
+import org.apache.druid.query.policy.RowFilterPolicy;
import org.apache.druid.rpc.indexing.NoopOverlordClient;
import org.apache.druid.rpc.indexing.OverlordClient;
+import org.apache.druid.segment.column.ColumnType;
import org.apache.druid.segment.join.JoinableFactory;
import org.apache.druid.segment.join.JoinableFactoryWrapper;
import org.apache.druid.server.DruidNode;
@@ -75,6 +79,7 @@
import org.apache.druid.server.security.NoopEscalator;
import org.apache.druid.server.security.ResourceType;
import org.apache.druid.sql.SqlStatementFactory;
+import org.apache.druid.sql.calcite.BaseCalciteQueryTest;
import org.apache.druid.sql.calcite.aggregation.SqlAggregationModule;
import org.apache.druid.sql.calcite.planner.DruidOperatorTable;
import org.apache.druid.sql.calcite.planner.PlannerConfig;
@@ -117,6 +122,7 @@ public class CalciteTests
public static final String ARRAYS_DATASOURCE = "arrays";
public static final String BROADCAST_DATASOURCE = "broadcast";
public static final String FORBIDDEN_DATASOURCE = "forbiddenDatasource";
+ public static final String RESTRICTED_DATASOURCE = "restrictedDatasource_m1_is_6";
public static final String FORBIDDEN_DESTINATION = "forbiddenDestination";
public static final String SOME_DATASOURCE = "some_datasource";
public static final String SOME_DATSOURCE_ESCAPED = "some\\_datasource";
@@ -133,26 +139,31 @@ public class CalciteTests
public static final String BENCHMARK_DATASOURCE = "benchmark_ds";
public static final String TEST_SUPERUSER_NAME = "testSuperuser";
+ public static final Policy POLICY_NO_RESTRICTION_SUPERUSER = NoRestrictionPolicy.instance();
+ public static final Policy POLICY_RESTRICTION = RowFilterPolicy.from(BaseCalciteQueryTest.equality("m1", 6, ColumnType.LONG));
public static final AuthorizerMapper TEST_AUTHORIZER_MAPPER = new AuthorizerMapper(null)
{
@Override
public Authorizer getAuthorizer(String name)
{
return (authenticationResult, resource, action) -> {
+ boolean isRestrictedTable = resource.getName().equals(RESTRICTED_DATASOURCE);
+
if (TEST_SUPERUSER_NAME.equals(authenticationResult.getIdentity())) {
- return Access.OK;
+ return isRestrictedTable ? Access.allowWithRestriction(POLICY_NO_RESTRICTION_SUPERUSER) : Access.OK;
}
switch (resource.getType()) {
case ResourceType.DATASOURCE:
- if (FORBIDDEN_DATASOURCE.equals(resource.getName())) {
- return new Access(false);
- } else {
- return Access.OK;
+ switch (resource.getName()) {
+ case FORBIDDEN_DATASOURCE:
+ return Access.DENIED;
+ default:
+ return isRestrictedTable ? Access.allowWithRestriction(POLICY_RESTRICTION) : Access.OK;
}
case ResourceType.VIEW:
if ("forbiddenView".equals(resource.getName())) {
- return new Access(false);
+ return Access.DENIED;
} else {
return Access.OK;
}
@@ -161,14 +172,14 @@ public Authorizer getAuthorizer(String name)
case ResourceType.EXTERNAL:
if (Action.WRITE.equals(action)) {
if (FORBIDDEN_DESTINATION.equals(resource.getName())) {
- return new Access(false);
+ return Access.DENIED;
} else {
return Access.OK;
}
}
- return new Access(false);
+ return Access.DENIED;
default:
- return new Access(false);
+ return Access.DENIED;
}
};
}
@@ -180,20 +191,22 @@ public Authorizer getAuthorizer(String name)
public Authorizer getAuthorizer(String name)
{
return (authenticationResult, resource, action) -> {
+ boolean isRestrictedTable = resource.getName().equals(RESTRICTED_DATASOURCE);
+
if (TEST_SUPERUSER_NAME.equals(authenticationResult.getIdentity())) {
- return Access.OK;
+ return isRestrictedTable ? Access.allowWithRestriction(POLICY_NO_RESTRICTION_SUPERUSER) : Access.OK;
}
switch (resource.getType()) {
case ResourceType.DATASOURCE:
if (FORBIDDEN_DATASOURCE.equals(resource.getName())) {
- return new Access(false);
+ return Access.DENIED;
} else {
- return Access.OK;
+ return isRestrictedTable ? Access.allowWithRestriction(POLICY_RESTRICTION) : Access.OK;
}
case ResourceType.VIEW:
if ("forbiddenView".equals(resource.getName())) {
- return new Access(false);
+ return Access.DENIED;
} else {
return Access.OK;
}
@@ -201,7 +214,7 @@ public Authorizer getAuthorizer(String name)
case ResourceType.EXTERNAL:
return Access.OK;
default:
- return new Access(false);
+ return Access.DENIED;
}
};
}
@@ -254,10 +267,10 @@ public AuthenticationResult createEscalatedAuthenticationResult()
);
public static final Injector INJECTOR = QueryStackTests.defaultInjectorBuilder()
- .addModule(new LookylooModule())
- .addModule(new SqlAggregationModule())
- .addModule(new CalciteTestOperatorModule())
- .build();
+ .addModule(new LookylooModule())
+ .addModule(new SqlAggregationModule())
+ .addModule(new CalciteTestOperatorModule())
+ .build();
private CalciteTests()
{
@@ -398,7 +411,8 @@ public static SystemSchema createMockSystemSchema(
provider,
NodeRole.COORDINATOR,
"/simple/leader"
- ) {
+ )
+ {
@Override
public String findCurrentLeader()
{
@@ -406,7 +420,8 @@ public String findCurrentLeader()
}
};
- final OverlordClient overlordClient = new NoopOverlordClient() {
+ final OverlordClient overlordClient = new NoopOverlordClient()
+ {
@Override
public ListenableFuture findCurrentLeader()
{
@@ -481,7 +496,8 @@ public static DruidSchemaCatalog createMockRootSchema(
conglomerate,
walker,
plannerConfig,
- authorizerMapper);
+ authorizerMapper
+ );
}
/**
diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/util/TestDataBuilder.java b/sql/src/test/java/org/apache/druid/sql/calcite/util/TestDataBuilder.java
index 383c740811e1..8093e36dfc38 100644
--- a/sql/src/test/java/org/apache/druid/sql/calcite/util/TestDataBuilder.java
+++ b/sql/src/test/java/org/apache/druid/sql/calcite/util/TestDataBuilder.java
@@ -878,6 +878,15 @@ public static SpecificSegmentsQuerySegmentWalker createMockWalker(
.build(),
index2
).add(
+ DataSegment.builder()
+ .dataSource(CalciteTests.RESTRICTED_DATASOURCE)
+ .interval(index1.getDataInterval())
+ .version("1")
+ .shardSpec(new LinearShardSpec(0))
+ .size(0)
+ .build(),
+ index1
+ ).add(
DataSegment.builder()
.dataSource(CalciteTests.FORBIDDEN_DATASOURCE)
.interval(forbiddenIndex.getDataInterval())
diff --git a/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java b/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java
index b44d0dfea584..8168fb53683d 100644
--- a/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java
+++ b/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java
@@ -76,9 +76,9 @@
import org.apache.druid.server.mocks.MockHttpServletResponse;
import org.apache.druid.server.scheduling.HiLoQueryLaningStrategy;
import org.apache.druid.server.scheduling.ManualQueryPrioritizationStrategy;
-import org.apache.druid.server.security.Access;
import org.apache.druid.server.security.AuthConfig;
import org.apache.druid.server.security.AuthenticationResult;
+import org.apache.druid.server.security.AuthorizationResult;
import org.apache.druid.server.security.ForbiddenException;
import org.apache.druid.server.security.ResourceAction;
import org.apache.druid.sql.DirectStatement;
@@ -350,20 +350,39 @@ public void tearDown() throws Exception
@Test
public void testUnauthorized()
{
- try {
- postForAsyncResponse(
- createSimpleQueryWithId("id", "select count(*) from forbiddenDatasource"),
- request()
- );
- Assert.fail("doPost did not throw ForbiddenException for an unauthorized query");
- }
- catch (ForbiddenException e) {
- // expected
- }
+ ForbiddenException e = Assert.assertThrows(ForbiddenException.class, () -> {
+ postForAsyncResponse(createSimpleQueryWithId("id", "select count(*) from forbiddenDatasource"), request());
+ });
+ Assert.assertEquals("Unauthorized", e.getMessage());
Assert.assertEquals(1, testRequestLogger.getSqlQueryLogs().size());
Assert.assertTrue(lifecycleManager.getAll("id").isEmpty());
}
+ @Test
+ public void testRestricted() throws Exception
+ {
+ req = makeSuperUserReq();
+ final List> resultAsSuperUser = doPost(createSimpleQueryWithId(
+ "id",
+ "select count(*) as cnt from restrictedDatasource_m1_is_6"
+ )).rhs;
+ Assert.assertEquals(ImmutableList.of(ImmutableMap.of("cnt", 6)), resultAsSuperUser);
+
+ checkSqlRequestLog(true, CalciteTests.TEST_SUPERUSER_NAME);
+ testRequestLogger.clear();
+ Assert.assertTrue(lifecycleManager.getAll("id").isEmpty());
+
+ req = makeRegularUserReq();
+ final List> resultAsRegularUser = doPost(createSimpleQueryWithId(
+ "id",
+ "select count(*) as cnt from restrictedDatasource_m1_is_6"
+ )).rhs;
+ Assert.assertEquals(ImmutableList.of(ImmutableMap.of("cnt", 1)), resultAsRegularUser);
+ checkSqlRequestLog(true);
+ testRequestLogger.clear();
+ Assert.assertTrue(lifecycleManager.getAll("id").isEmpty());
+ }
+
@Test
public void testCountStar() throws Exception
{
@@ -1555,7 +1574,7 @@ public ErrorResponseTransformStrategy getErrorResponseTransformStrategy()
/**
* See class-level javadoc for {@link org.apache.druid.sql.calcite.util.testoperator.AssertionErrorOperatorConversion}
* for rationale as to why this test exists.
- *
+ *