Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add read_failures privilege for authorizing failure store #119915

Draft
wants to merge 2 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,7 @@ A successful call returns an object with "cluster", "index", and "remote_cluster
"read",
"read_cross_cluster",
"view_index_metadata",
"read_failures",
"write"
],
"remote_cluster" : [
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1719,7 +1719,7 @@ private static Set<ResolvedExpression> expandToOpenClosed(
for (int i = 0, n = indexAbstraction.getIndices().size(); i < n; i++) {
Index index = indexAbstraction.getIndices().get(i);
IndexMetadata indexMetadata = context.state.metadata().index(index);
if (indexMetadata.getState() != excludeState) {
if (indexMetadata != null && indexMetadata.getState() != excludeState) {
resources.add(
new ResolvedExpression(
index.getName(),
Expand Down

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,9 @@ public final class IndexPrivilege extends Privilege {
ResolveIndexAction.NAME,
TransportResolveClusterAction.NAME
);
// This is a special case: read_failures acts like `read` *only* for failure store indices in authorized data streams.
// This internal action is not used, but having it makes automaton subset checks work as expected with this privilege.
private static final Automaton READ_FAILURES_AUTOMATON = patterns("internal:special/read_failures");
private static final Automaton READ_CROSS_CLUSTER_AUTOMATON = patterns(
"internal:transport/proxy/indices:data/read/*",
TransportClusterSearchShardsAction.TYPE.name(),
Expand Down Expand Up @@ -178,7 +181,11 @@ public final class IndexPrivilege extends Privilege {

public static final IndexPrivilege NONE = new IndexPrivilege("none", Automatons.EMPTY);
public static final IndexPrivilege ALL = new IndexPrivilege("all", ALL_AUTOMATON);
public static final IndexPrivilege READ = new IndexPrivilege("read", READ_AUTOMATON);
public static final String READ_PRIVILEGE_NAME = "read";
public static final IndexPrivilege READ = new IndexPrivilege(READ_PRIVILEGE_NAME, READ_AUTOMATON);
public static final String READ_FAILURES_PRIVILEGE_NAME = "read_failures";
// read_failures is a special case - it should act like `read`, but adjusted to only allow access to failure indices
public static final IndexPrivilege READ_FAILURES = new IndexPrivilege(READ_FAILURES_PRIVILEGE_NAME, READ_FAILURES_AUTOMATON);
public static final IndexPrivilege READ_CROSS_CLUSTER = new IndexPrivilege("read_cross_cluster", READ_CROSS_CLUSTER_AUTOMATON);
public static final IndexPrivilege CREATE = new IndexPrivilege("create", CREATE_AUTOMATON);
public static final IndexPrivilege INDEX = new IndexPrivilege("index", INDEX_AUTOMATON);
Expand Down Expand Up @@ -221,6 +228,7 @@ public final class IndexPrivilege extends Privilege {
entry("create_index", CREATE_INDEX),
entry("monitor", MONITOR),
entry("read", READ),
entry("read_failures", READ_FAILURES),
entry("index", INDEX),
entry("delete", DELETE),
entry("write", WRITE),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ public void testOrderingOfPrivilegeNames() throws Exception {
}

public void testFindPrivilegesThatGrant() {
assertThat(findPrivilegesThatGrant(TransportSearchAction.TYPE.name()), equalTo(List.of("read", "all")));
assertThat(findPrivilegesThatGrant(TransportSearchAction.TYPE.name()), equalTo(List.of("read", "read_failures", "all")));
assertThat(findPrivilegesThatGrant(TransportIndexAction.NAME), equalTo(List.of("create_doc", "create", "index", "write", "all")));
assertThat(findPrivilegesThatGrant(TransportUpdateAction.NAME), equalTo(List.of("index", "write", "all")));
assertThat(findPrivilegesThatGrant(TransportDeleteAction.NAME), equalTo(List.of("delete", "write", "all")));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -676,7 +676,7 @@ public void testCrossClusterQueryWithOnlyRemotePrivs() throws Exception {
error.getMessage(),
containsString(
"action [indices:data/read/esql] is unauthorized for user [remote_search_user] with effective roles [remote_search], "
+ "this action is granted by the index privileges [read,read_cross_cluster,all]"
+ "this action is granted by the index privileges [read,read_failures,read_cross_cluster,all]"
)
);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -435,7 +435,7 @@ public void testUpdateCrossClusterApiKey() throws Exception {
+ "for user [foo] with assigned roles [role] authenticated by API key id ["
+ apiKeyId
+ "] of user [test_user] on indices [index], this action is granted by the index privileges "
+ "[view_index_metadata,manage,read,all]"
+ "[view_index_metadata,manage,read,read_failures,all]"
)
);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ public void testWorkflowsRestrictionAllowsAccess() throws IOException {
+ apiKeyId
+ "] of user ["
+ WORKFLOW_API_KEY_USER
+ "] on indices [my-app-b], this action is granted by the index privileges [read,all]"
+ "] on indices [my-app-b], this action is granted by the index privileges [read,read_failures,all]"
)
);
assertThat(e.getMessage(), not(containsString("access restricted by workflow")));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,27 +8,42 @@
package org.elasticsearch.integration;

import org.elasticsearch.ElasticsearchSecurityException;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
import org.elasticsearch.action.admin.indices.rollover.RolloverRequest;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;
import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.datastreams.CreateDataStreamAction;
import org.elasticsearch.action.datastreams.ModifyDataStreamsAction;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
import org.elasticsearch.cluster.metadata.DataStream;
import org.elasticsearch.cluster.metadata.DataStreamAction;
import org.elasticsearch.cluster.metadata.DataStreamFailureStore;
import org.elasticsearch.cluster.metadata.DataStreamOptions;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.metadata.ResettableValue;
import org.elasticsearch.cluster.metadata.Template;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.core.Strings;
import org.elasticsearch.datastreams.DataStreamsPlugin;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.test.SecurityIntegTestCase;
import org.elasticsearch.test.SecuritySettingsSource;
import org.elasticsearch.test.SecuritySettingsSourceField;
import org.elasticsearch.transport.netty4.Netty4Plugin;
import org.elasticsearch.xcontent.XContentType;
import org.elasticsearch.xpack.security.LocalStateSecurity;
import org.elasticsearch.xpack.wildcard.Wildcard;

Expand All @@ -43,6 +58,7 @@
import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.nullValue;

public class DataStreamSecurityIT extends SecurityIntegTestCase {

Expand All @@ -51,6 +67,31 @@ protected Collection<Class<? extends Plugin>> nodePlugins() {
return List.of(LocalStateSecurity.class, Netty4Plugin.class, MapperExtrasPlugin.class, DataStreamsPlugin.class, Wildcard.class);
}

@Override
protected String configUsers() {
final String usersPasswdHashed = new String(
getFastStoredHashAlgoForTests().hash(SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)
);
return super.configUsers() + "only_failures:" + usersPasswdHashed + "\n";
}

@Override
protected String configUsersRoles() {
return super.configUsersRoles() + "only_failures:only_failures\n";
}

@Override
protected String configRoles() {
// role that has analyze indices privileges only
return Strings.format("""
%s
only_failures:
indices:
- names: '*'
privileges: [ 'read_failures', 'write' ]
""", super.configRoles());
}

public void testRemoveGhostReference() throws Exception {
var headers = Map.of(
BASIC_AUTH_HEADER,
Expand Down Expand Up @@ -142,4 +183,62 @@ public void onFailure(Exception e) {
assertThat(indicesStatsResponse.getIndices().size(), equalTo(shouldBreakIndexName ? 1 : 2));
}

public void testFailureStoreAuthorziation() throws Exception {
var adminHeaders = Map.of(
BASIC_AUTH_HEADER,
basicAuthHeaderValue(SecuritySettingsSource.TEST_USER_NAME, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)
);
final var adminClient = client().filterWithHeader(adminHeaders);
var onlyFailHeaders = Map.of(
BASIC_AUTH_HEADER,
basicAuthHeaderValue(SecuritySettingsSource.TEST_USER_NAME, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)
);
final var failuresClient = client().filterWithHeader(onlyFailHeaders);

var putTemplateRequest = new TransportPutComposableIndexTemplateAction.Request("id");
putTemplateRequest.indexTemplate(
ComposableIndexTemplate.builder()
.indexPatterns(List.of("stuff-*"))
.template(
Template.builder()
.mappings(CompressedXContent.fromJSON("{\"properties\": {\"code\": {\"type\": \"integer\"}}}"))
.dataStreamOptions(
new DataStreamOptions.Template(
ResettableValue.create(new DataStreamFailureStore.Template(ResettableValue.create(true)))
)
)
)
.dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false))
.build()
);
assertAcked(adminClient.execute(TransportPutComposableIndexTemplateAction.TYPE, putTemplateRequest).actionGet());

String dataStreamName = "stuff-es";
var request = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, dataStreamName);
assertAcked(adminClient.execute(CreateDataStreamAction.INSTANCE, request).actionGet());

BulkRequest bulkRequest = new BulkRequest();
bulkRequest.add(
new IndexRequest(dataStreamName).opType(DocWriteRequest.OpType.CREATE)
.source("{\"code\": \"well this aint right\"}", XContentType.JSON),
new IndexRequest(dataStreamName).opType(DocWriteRequest.OpType.CREATE)
.source("{\"@timestamp\": \"2015-01-01T12:10:30Z\", \"code\": 404}", XContentType.JSON)
);
BulkResponse bulkResponse = adminClient.bulk(bulkRequest).actionGet();
assertThat(bulkResponse.getItems().length, equalTo(2));
String backingIndexPrefix = DataStream.BACKING_INDEX_PREFIX + dataStreamName;
String failureIndexPrefix = DataStream.FAILURE_STORE_PREFIX + dataStreamName;

for (BulkItemResponse itemResponse : bulkResponse) {
assertThat(itemResponse.getFailure(), nullValue());
assertThat(itemResponse.status(), equalTo(RestStatus.CREATED));
// assertThat(itemResponse.getIndex(), anyOf(startsWith(backingIndexPrefix), startsWith(failureIndexPrefix)));
}

indicesAdmin().refresh(new RefreshRequest(dataStreamName)).actionGet();
var getResp = failuresClient.admin().indices().getIndex(new GetIndexRequest().indices(dataStreamName + "::*"));
var searchResponse = failuresClient.prepareSearch(dataStreamName + "::failures").get();
assertThat(searchResponse.getHits().getTotalHits().value(), equalTo(1L));
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -874,23 +874,33 @@ static AuthorizedIndices resolveAuthorizedIndicesFromRole(
// TODO: can this be done smarter? I think there are usually more indices/aliases in the cluster then indices defined a roles?
if (includeDataStreams) {
for (IndexAbstraction indexAbstraction : lookup.values()) {
if (predicate.test(indexAbstraction)) {
IndicesPermission.AuthorizedComponents authResult = predicate.test(indexAbstraction);
if (authResult != null && authResult != IndicesPermission.AuthorizedComponents.NONE) {
indicesAndAliases.add(indexAbstraction.getName());
if (indexAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM) {
// add data stream and its backing indices for any authorized data streams
for (Index index : indexAbstraction.getIndices()) {
indicesAndAliases.add(index.getName());
if (authResult == IndicesPermission.AuthorizedComponents.ALL
|| authResult == IndicesPermission.AuthorizedComponents.DATA) {
for (Index index : indexAbstraction.getIndices()) {
indicesAndAliases.add(index.getName());
}
}
// TODO: We need to limit if a data stream's failure indices should return here.
for (Index index : ((DataStream) indexAbstraction).getFailureIndices().getIndices()) {
indicesAndAliases.add(index.getName());

if (authResult == IndicesPermission.AuthorizedComponents.ALL
|| authResult == IndicesPermission.AuthorizedComponents.FAILURES) {
for (Index index : ((DataStream) indexAbstraction).getFailureIndices().getIndices()) {
indicesAndAliases.add(index.getName());
}
}

}
}
}
} else {
for (IndexAbstraction indexAbstraction : lookup.values()) {
if (indexAbstraction.getType() != IndexAbstraction.Type.DATA_STREAM && predicate.test(indexAbstraction)) {
IndicesPermission.AuthorizedComponents authResult = predicate.test(indexAbstraction);
if (indexAbstraction.getType() != IndexAbstraction.Type.DATA_STREAM
&& authResult != null
&& authResult != IndicesPermission.AuthorizedComponents.NONE) {
indicesAndAliases.add(indexAbstraction.getName());
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -987,7 +987,10 @@ public void testUnknownRoleCausesDenial() {
)
)
);
assertThat(securityException, throwableWithMessage(containsString("this action is granted by the index privileges [read,all]")));
assertThat(
securityException,
throwableWithMessage(containsString("this action is granted by the index privileges [read,read_failures,all]"))
);

verify(auditTrail).accessDenied(eq(requestId), eq(authentication), eq(action), eq(request), authzInfoRoles(Role.EMPTY.names()));
verifyNoMoreInteractions(auditTrail);
Expand Down Expand Up @@ -1033,7 +1036,10 @@ public void testServiceAccountDenial() {
throwableWithMessage(containsString("[" + action + "] is unauthorized for service account [" + serviceUser.principal() + "]"))
);
verify(auditTrail).accessDenied(eq(requestId), eq(authentication), eq(action), eq(request), authzInfoRoles(role.names()));
assertThat(securityException, throwableWithMessage(containsString("this action is granted by the index privileges [read,all]")));
assertThat(
securityException,
throwableWithMessage(containsString("this action is granted by the index privileges [read,read_failures,all]"))
);
verifyNoMoreInteractions(auditTrail);
}

Expand Down Expand Up @@ -1083,7 +1089,10 @@ public void testThatRoleWithNoIndicesIsDenied() {
containsString("[" + action + "] is unauthorized" + " for user [test user]" + " with effective roles [no_indices]")
)
);
assertThat(securityException, throwableWithMessage(containsString("this action is granted by the index privileges [read,all]")));
assertThat(
securityException,
throwableWithMessage(containsString("this action is granted by the index privileges [read,read_failures,all]"))
);

verify(auditTrail).accessDenied(
eq(requestId),
Expand Down Expand Up @@ -1536,7 +1545,10 @@ public void testDenialErrorMessagesForSearchAction() {
assertThat(securityException, throwableWithMessage(containsString("other-4")));
assertThat(securityException, throwableWithMessage(not(containsString("all-1"))));
assertThat(securityException, throwableWithMessage(not(containsString("read-2"))));
assertThat(securityException, throwableWithMessage(containsString(", this action is granted by the index privileges [read,all]")));
assertThat(
securityException,
throwableWithMessage(containsString(", this action is granted by the index privileges [read,read_failures,all]"))
);
}

public void testDenialErrorMessagesForBulkIngest() throws Exception {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,4 @@ setup:
# I would much prefer we could just check that specific entries are in the array, but we don't have
# an assertion for that
- length: { "cluster" : 62 }
- length: { "index" : 22 }
- length: { "index" : 23 }