diff --git a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle index 60ae4d58f343e..90a4f74b5e9f4 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle @@ -10,6 +10,8 @@ import org.elasticsearch.gradle.util.Pair import org.elasticsearch.gradle.util.GradleUtils import org.elasticsearch.gradle.internal.test.TestUtil +import org.elasticsearch.gradle.internal.idea.EnablePreviewFeaturesTask +import org.elasticsearch.gradle.internal.idea.IdeaXmlUtil import org.jetbrains.gradle.ext.JUnit import java.nio.file.Files @@ -144,19 +146,10 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { } // modifies the idea module config to enable preview features on ':libs:native' module - tasks.register("enablePreviewFeatures") { + tasks.register("enablePreviewFeatures", EnablePreviewFeaturesTask) { group = 'ide' description = 'Enables preview features on native library module' dependsOn tasks.named("enableExternalConfiguration") - -// ext { - def enablePreview = { moduleFile, languageLevel -> - IdeaXmlUtil.modifyXml(moduleFile) { xml -> - xml.component.find { it.'@name' == 'NewModuleRootManager' }?.'@LANGUAGE_LEVEL' = languageLevel - } - } -// } - doLast { enablePreview('.idea/modules/libs/native/elasticsearch.libs.native.main.iml', 'JDK_21_PREVIEW') enablePreview('.idea/modules/libs/native/elasticsearch.libs.native.test.iml', 'JDK_21_PREVIEW') @@ -277,46 +270,6 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { } } -/** - * Parses a given XML file, applies a set of changes, and writes those changes back to the original file. - * - * @param path Path to existing XML file - * @param action Action to perform on parsed XML document - * @param preface optional front matter to add after the XML declaration - * but before the XML document, e.g. a doctype or comment - */ - -class IdeaXmlUtil { - static Node parseXml(Object xmlPath) { - File xmlFile = new File(xmlPath) - XmlParser xmlParser = new XmlParser(false, true, true) - xmlParser.setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd", false) - Node xml = xmlParser.parse(xmlFile) - return xml - } - - static void modifyXml(Object xmlPath, Action action, String preface = null) { - File xmlFile = new File(xmlPath) - if (xmlFile.exists()) { - Node xml = parseXml(xmlPath) - action.execute(xml) - - xmlFile.withPrintWriter { writer -> - def printer = new XmlNodePrinter(writer) - printer.namespaceAware = true - printer.preserveWhitespace = true - writer.write("\n") - - if (preface != null) { - writer.write(preface) - } - printer.print(xml) - } - } - } -} - - Pair locateElasticsearchWorkspace(Gradle gradle) { if (gradle.parent == null) { // See if any of these included builds is the Elasticsearch gradle diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/idea/EnablePreviewFeaturesTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/idea/EnablePreviewFeaturesTask.java new file mode 100644 index 0000000000000..f8c8b5127827f --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/idea/EnablePreviewFeaturesTask.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.gradle.internal.idea; + +import groovy.util.Node; +import groovy.util.NodeList; + +import org.gradle.api.DefaultTask; +import org.xml.sax.SAXException; + +import java.io.IOException; + +import javax.xml.parsers.ParserConfigurationException; + +public class EnablePreviewFeaturesTask extends DefaultTask { + + public void enablePreview(String moduleFile, String languageLevel) throws IOException, ParserConfigurationException, SAXException { + IdeaXmlUtil.modifyXml(moduleFile, xml -> { + // Find the 'component' node + NodeList nodes = (NodeList) xml.depthFirst(); + Node componentNode = null; + for (Object node : nodes) { + Node currentNode = (Node) node; + if ("component".equals(currentNode.name()) && "NewModuleRootManager".equals(currentNode.attribute("name"))) { + componentNode = currentNode; + break; + } + } + + // Add the attribute to the 'component' node + if (componentNode != null) { + componentNode.attributes().put("LANGUAGE_LEVEL", languageLevel); + } + }); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/idea/IdeaXmlUtil.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/idea/IdeaXmlUtil.java new file mode 100644 index 0000000000000..b7cc2862a0af1 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/idea/IdeaXmlUtil.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.gradle.internal.idea; + +import groovy.util.Node; +import groovy.util.XmlParser; +import groovy.xml.XmlNodePrinter; + +import org.gradle.api.Action; +import org.xml.sax.SAXException; + +import java.io.File; +import java.io.IOException; +import java.io.PrintWriter; + +import javax.xml.parsers.ParserConfigurationException; + +public class IdeaXmlUtil { + + static Node parseXml(String xmlPath) throws IOException, SAXException, ParserConfigurationException { + File xmlFile = new File(xmlPath); + XmlParser xmlParser = new XmlParser(false, true, true); + xmlParser.setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd", false); + Node xml = xmlParser.parse(xmlFile); + return xml; + } + + /** + * Parses a given XML file, applies a set of changes, and writes those changes back to the original file. + * + * @param path Path to existing XML file + * @param action Action to perform on parsed XML document + * but before the XML document, e.g. a doctype or comment + */ + static void modifyXml(String xmlPath, Action action) throws IOException, ParserConfigurationException, SAXException { + modifyXml(xmlPath, action, null); + } + + /** + * Parses a given XML file, applies a set of changes, and writes those changes back to the original file. + * + * @param path Path to existing XML file + * @param action Action to perform on parsed XML document + * @param preface optional front matter to add after the XML declaration + * but before the XML document, e.g. a doctype or comment + */ + static void modifyXml(String xmlPath, Action action, String preface) throws IOException, ParserConfigurationException, + SAXException { + File xmlFile = new File(xmlPath); + if (xmlFile.exists()) { + Node xml = parseXml(xmlPath); + action.execute(xml); + + try (PrintWriter writer = new PrintWriter(xmlFile)) { + var printer = new XmlNodePrinter(writer); + printer.setNamespaceAware(true); + printer.setPreserveWhitespace(true); + writer.write("\n"); + if (preface != null) { + writer.write(preface); + } + printer.print(xml); + } + } + } +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java index 265bd52eabe83..916823fd91b61 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java @@ -89,7 +89,6 @@ public static void stopHttpServers() throws IOException { } public void testBuilderUsesDefaultSSLContext() throws Exception { - assumeFalse("https://github.com/elastic/elasticsearch/issues/49094", inFipsJvm()); final SSLContext defaultSSLContext = SSLContext.getDefault(); try { try (RestClient client = buildRestClient()) { @@ -97,10 +96,15 @@ public void testBuilderUsesDefaultSSLContext() throws Exception { client.performRequest(new Request("GET", "/")); fail("connection should have been rejected due to SSL handshake"); } catch (Exception e) { - assertThat(e, instanceOf(SSLHandshakeException.class)); + if (inFipsJvm()) { + // Bouncy Castle throw a different exception + assertThat(e, instanceOf(IOException.class)); + assertThat(e.getCause(), instanceOf(javax.net.ssl.SSLException.class)); + } else { + assertThat(e, instanceOf(SSLHandshakeException.class)); + } } } - SSLContext.setDefault(getSslContext()); try (RestClient client = buildRestClient()) { Response response = client.performRequest(new Request("GET", "/")); @@ -112,7 +116,6 @@ public void testBuilderUsesDefaultSSLContext() throws Exception { } public void testBuilderSetsThreadName() throws Exception { - assumeFalse("https://github.com/elastic/elasticsearch/issues/49094", inFipsJvm()); final SSLContext defaultSSLContext = SSLContext.getDefault(); try { SSLContext.setDefault(getSslContext()); diff --git a/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/cli/keystore/KeyStoreWrapperTests.java b/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/cli/keystore/KeyStoreWrapperTests.java index 38bb7d592f7c0..5ab27bac3998a 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/cli/keystore/KeyStoreWrapperTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/cli/keystore/KeyStoreWrapperTests.java @@ -58,6 +58,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; public class KeyStoreWrapperTests extends ESTestCase { @@ -436,17 +437,8 @@ public void testStringAndFileDistinction() throws Exception { public void testLegacyV3() throws GeneralSecurityException, IOException { assumeFalse("Cannot open unprotected keystore on FIPS JVM", inFipsJvm()); final Path configDir = createTempDir(); - final Path keystore = configDir.resolve("elasticsearch.keystore"); - try ( - InputStream is = KeyStoreWrapperTests.class.getResourceAsStream("/format-v3-elasticsearch.keystore"); - OutputStream os = Files.newOutputStream(keystore) - ) { - final byte[] buffer = new byte[4096]; - int readBytes; - while ((readBytes = is.read(buffer)) > 0) { - os.write(buffer, 0, readBytes); - } - } + copyKeyStoreFromResourceToConfigDir(configDir, "/format-v3-elasticsearch.keystore"); + final KeyStoreWrapper wrapper = KeyStoreWrapper.load(configDir); assertNotNull(wrapper); wrapper.decrypt(new char[0]); @@ -460,9 +452,31 @@ public void testLegacyV3() throws GeneralSecurityException, IOException { public void testLegacyV5() throws GeneralSecurityException, IOException { final Path configDir = createTempDir(); + copyKeyStoreFromResourceToConfigDir(configDir, "/format-v5-with-password-elasticsearch.keystore"); + + final KeyStoreWrapper wrapper = KeyStoreWrapper.load(configDir); + assertNotNull(wrapper); + wrapper.decrypt("keystorepassword".toCharArray()); + assertThat(wrapper.getFormatVersion(), equalTo(5)); + assertThat(wrapper.getSettingNames(), equalTo(Set.of("keystore.seed"))); + } + + public void testLegacyV6() throws GeneralSecurityException, IOException { + final Path configDir = createTempDir(); + copyKeyStoreFromResourceToConfigDir(configDir, "/format-v6-elasticsearch.keystore"); + + final KeyStoreWrapper wrapper = KeyStoreWrapper.load(configDir); + assertNotNull(wrapper); + wrapper.decrypt("keystorepassword".toCharArray()); + assertThat(wrapper.getFormatVersion(), equalTo(6)); + assertThat(wrapper.getSettingNames(), equalTo(Set.of("keystore.seed", "string"))); + assertThat(wrapper.getString("string"), equalTo("value")); + } + + private void copyKeyStoreFromResourceToConfigDir(Path configDir, String name) throws IOException { final Path keystore = configDir.resolve("elasticsearch.keystore"); try ( - InputStream is = KeyStoreWrapperTests.class.getResourceAsStream("/format-v5-with-password-elasticsearch.keystore"); + InputStream is = KeyStoreWrapperTests.class.getResourceAsStream(name); // OutputStream os = Files.newOutputStream(keystore) ) { final byte[] buffer = new byte[4096]; @@ -471,11 +485,6 @@ public void testLegacyV5() throws GeneralSecurityException, IOException { os.write(buffer, 0, readBytes); } } - final KeyStoreWrapper wrapper = KeyStoreWrapper.load(configDir); - assertNotNull(wrapper); - wrapper.decrypt("keystorepassword".toCharArray()); - assertThat(wrapper.getFormatVersion(), equalTo(5)); - assertThat(wrapper.getSettingNames(), equalTo(Set.of("keystore.seed"))); } public void testSerializationNewlyCreated() throws Exception { @@ -487,6 +496,7 @@ public void testSerializationNewlyCreated() throws Exception { wrapper.writeTo(out); final KeyStoreWrapper fromStream = new KeyStoreWrapper(out.bytes().streamInput()); + assertThat(fromStream.getFormatVersion(), is(KeyStoreWrapper.CURRENT_VERSION)); assertThat(fromStream.getSettingNames(), hasSize(2)); assertThat(fromStream.getSettingNames(), containsInAnyOrder("string_setting", "keystore.seed")); diff --git a/distribution/tools/keystore-cli/src/test/resources/format-v6-elasticsearch.keystore b/distribution/tools/keystore-cli/src/test/resources/format-v6-elasticsearch.keystore new file mode 100644 index 0000000000000..0f680cc013563 Binary files /dev/null and b/distribution/tools/keystore-cli/src/test/resources/format-v6-elasticsearch.keystore differ diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java index fe0f82560894c..8b3977fe66428 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java @@ -71,7 +71,7 @@ static List systemJvmOptions(Settings nodeSettings, final Map s).toList(); } @@ -140,7 +140,7 @@ private static Stream maybeWorkaroundG1Bug() { } @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) - private static Stream maybeAllowSecurityManager() { + private static Stream maybeAllowSecurityManager(boolean useEntitlements) { if (RuntimeVersionFeature.isSecurityManagerAvailable()) { // Will become conditional on useEntitlements once entitlements can run without SM return Stream.of("-Djava.security.manager=allow"); @@ -167,12 +167,16 @@ private static Stream maybeAttachEntitlementAgent(boolean useEntitlement } catch (IOException e) { throw new IllegalStateException("Failed to list entitlement jars in: " + dir, e); } + // We instrument classes in these modules to call the bridge. Because the bridge gets patched + // into java.base, we must export the bridge from java.base to these modules. + String modulesContainingEntitlementInstrumentation = "java.logging"; return Stream.of( "-Des.entitlements.enabled=true", "-XX:+EnableDynamicAgentLoading", "-Djdk.attach.allowAttachSelf=true", "--patch-module=java.base=" + bridgeJar, - "--add-exports=java.base/org.elasticsearch.entitlement.bridge=org.elasticsearch.entitlement" + "--add-exports=java.base/org.elasticsearch.entitlement.bridge=org.elasticsearch.entitlement," + + modulesContainingEntitlementInstrumentation ); } } diff --git a/docs/changelog/118324.yaml b/docs/changelog/118324.yaml new file mode 100644 index 0000000000000..729ff56f6a253 --- /dev/null +++ b/docs/changelog/118324.yaml @@ -0,0 +1,6 @@ +pr: 118324 +summary: Allow the data type of `null` in filters +area: ES|QL +type: bug +issues: + - 116351 diff --git a/docs/changelog/118652.yaml b/docs/changelog/118652.yaml new file mode 100644 index 0000000000000..0b08686230405 --- /dev/null +++ b/docs/changelog/118652.yaml @@ -0,0 +1,5 @@ +pr: 118652 +summary: Add Jina AI API to do inference for Embedding and Rerank models +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/118669.yaml b/docs/changelog/118669.yaml new file mode 100644 index 0000000000000..4e0d10aaac816 --- /dev/null +++ b/docs/changelog/118669.yaml @@ -0,0 +1,5 @@ +pr: 118669 +summary: "[Connector API] Support soft-deletes of connectors" +area: Extract&Transform +type: feature +issues: [] diff --git a/docs/changelog/118871.yaml b/docs/changelog/118871.yaml new file mode 100644 index 0000000000000..3c1a06d450f39 --- /dev/null +++ b/docs/changelog/118871.yaml @@ -0,0 +1,5 @@ +pr: 118871 +summary: "[Elastic Inference Service] Add ElasticInferenceService Unified ChatCompletions Integration" +area: Inference +type: enhancement +issues: [] diff --git a/docs/changelog/118919.yaml b/docs/changelog/118919.yaml new file mode 100644 index 0000000000000..832fd86fe08ba --- /dev/null +++ b/docs/changelog/118919.yaml @@ -0,0 +1,5 @@ +pr: 118919 +summary: Remove unsupported timeout from rest-api-spec license API +area: License +type: bug +issues: [] diff --git a/docs/changelog/118921.yaml b/docs/changelog/118921.yaml new file mode 100644 index 0000000000000..bd341616d8a14 --- /dev/null +++ b/docs/changelog/118921.yaml @@ -0,0 +1,5 @@ +pr: 118921 +summary: Add missing timeouts to rest-api-spec shutdown APIs +area: Infra/Node Lifecycle +type: bug +issues: [] diff --git a/docs/changelog/118938.yaml b/docs/changelog/118938.yaml new file mode 100644 index 0000000000000..395da7912fd4b --- /dev/null +++ b/docs/changelog/118938.yaml @@ -0,0 +1,5 @@ +pr: 118938 +summary: Hash functions +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/118954.yaml b/docs/changelog/118954.yaml new file mode 100644 index 0000000000000..ab2f2cda5c11e --- /dev/null +++ b/docs/changelog/118954.yaml @@ -0,0 +1,5 @@ +pr: 118954 +summary: Add missing parameter to `xpack.info` rest-api-spec +area: Infra/REST API +type: bug +issues: [] diff --git a/docs/changelog/119067.yaml b/docs/changelog/119067.yaml new file mode 100644 index 0000000000000..c7ddd570bea18 --- /dev/null +++ b/docs/changelog/119067.yaml @@ -0,0 +1,5 @@ +pr: 119067 +summary: Metrics for indexing failures due to version conflicts +area: CRUD +type: feature +issues: [] diff --git a/docs/changelog/119250.yaml b/docs/changelog/119250.yaml new file mode 100644 index 0000000000000..9db36957d8050 --- /dev/null +++ b/docs/changelog/119250.yaml @@ -0,0 +1,5 @@ +pr: 119250 +summary: Add rest endpoint for `create_from_source_index` +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/119449.yaml b/docs/changelog/119449.yaml new file mode 100644 index 0000000000000..f02bfa6d16d60 --- /dev/null +++ b/docs/changelog/119449.yaml @@ -0,0 +1,5 @@ +pr: 119449 +summary: Add missing traces ilm policy for OTel traces data streams +area: Data streams +type: bug +issues: [] diff --git a/docs/changelog/119504.yaml b/docs/changelog/119504.yaml new file mode 100644 index 0000000000000..f63e422face10 --- /dev/null +++ b/docs/changelog/119504.yaml @@ -0,0 +1,5 @@ +pr: 119504 +summary: Optimized index sorting for OTel logs +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/119542.yaml b/docs/changelog/119542.yaml new file mode 100644 index 0000000000000..aaf26c7dc4b0f --- /dev/null +++ b/docs/changelog/119542.yaml @@ -0,0 +1,5 @@ +pr: 119542 +summary: Wait while index is blocked +area: Transform +type: enhancement +issues: [] diff --git a/docs/changelog/119543.yaml b/docs/changelog/119543.yaml new file mode 100644 index 0000000000000..7027ea2a49672 --- /dev/null +++ b/docs/changelog/119543.yaml @@ -0,0 +1,7 @@ +pr: 119543 +summary: "[Inference API] Fix unique ID message for inference ID matches trained model\ + \ ID" +area: Machine Learning +type: bug +issues: + - 111312 diff --git a/docs/changelog/119691.yaml b/docs/changelog/119691.yaml new file mode 100644 index 0000000000000..186944394908d --- /dev/null +++ b/docs/changelog/119691.yaml @@ -0,0 +1,6 @@ +pr: 119691 +summary: Fix `bbq_hnsw` merge file cleanup on random IO exceptions +area: Vector Search +type: bug +issues: + - 119392 diff --git a/docs/changelog/119748.yaml b/docs/changelog/119748.yaml new file mode 100644 index 0000000000000..8b29fb7c1a647 --- /dev/null +++ b/docs/changelog/119748.yaml @@ -0,0 +1,6 @@ +pr: 119748 +summary: Issue S3 web identity token refresh call with sufficient permissions +area: Snapshot/Restore +type: bug +issues: + - 119747 diff --git a/docs/changelog/119749.yaml b/docs/changelog/119749.yaml new file mode 100644 index 0000000000000..aa2b16ceda5ea --- /dev/null +++ b/docs/changelog/119749.yaml @@ -0,0 +1,5 @@ +pr: 119749 +summary: Strengthen encryption for elasticsearch-keystore tool to AES 256 +area: Infra/CLI +type: enhancement +issues: [] diff --git a/docs/changelog/119793.yaml b/docs/changelog/119793.yaml new file mode 100644 index 0000000000000..80330c25c2f30 --- /dev/null +++ b/docs/changelog/119793.yaml @@ -0,0 +1,6 @@ +pr: 119793 +summary: Resolve/cluster should mark remotes as not connected when a security exception + is thrown +area: CCS +type: bug +issues: [] diff --git a/docs/changelog/119797.yaml b/docs/changelog/119797.yaml new file mode 100644 index 0000000000000..992c2078e0caa --- /dev/null +++ b/docs/changelog/119797.yaml @@ -0,0 +1,5 @@ +pr: 119797 +summary: "[Inference API] Fix bug checking for e5 or reranker default IDs" +area: Machine Learning +type: bug +issues: [] diff --git a/docs/reference/cat/nodes.asciidoc b/docs/reference/cat/nodes.asciidoc index c52315423f87e..a5a813e8d37d5 100644 --- a/docs/reference/cat/nodes.asciidoc +++ b/docs/reference/cat/nodes.asciidoc @@ -239,6 +239,9 @@ Number of indexing operations, such as `1`. `indexing.index_failed`, `iif`, `indexingIndexFailed`:: Number of failed indexing operations, such as `0`. +`indexing.index_failed_due_to_version_conflict`, `iifvc`, `indexingIndexFailedDueToVersionConflict`:: +Number of failed indexing operations due to version conflict, such as `0`. + `merges.current`, `mc`, `mergesCurrent`:: Number of current merge operations, such as `0`. diff --git a/docs/reference/cat/shards.asciidoc b/docs/reference/cat/shards.asciidoc index f73ac6e263cd2..2d3859e74c87e 100644 --- a/docs/reference/cat/shards.asciidoc +++ b/docs/reference/cat/shards.asciidoc @@ -162,6 +162,9 @@ Number of indexing operations, such as `1`. `indexing.index_failed`, `iif`, `indexingIndexFailed`:: Number of failed indexing operations, such as `0`. +`indexing.index_failed_due_to_version_conflict`, `iifvc`, `indexingIndexFailedDueToVersionConflict`:: +Number of failed indexing operations due to version conflict, such as `0`. + `merges.current`, `mc`, `mergesCurrent`:: Number of current merge operations, such as `0`. diff --git a/docs/reference/connector/apis/delete-connector-api.asciidoc b/docs/reference/connector/apis/delete-connector-api.asciidoc index b36a99bc2d8cc..f161a3c3b5933 100644 --- a/docs/reference/connector/apis/delete-connector-api.asciidoc +++ b/docs/reference/connector/apis/delete-connector-api.asciidoc @@ -6,14 +6,14 @@ beta::[] + .New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. -- -Removes a connector and associated sync jobs. -This is a destructive action that is not recoverable. +Soft-deletes a connector and removes associated sync jobs. Note: this action doesn't delete any API key, ingest pipeline or data index associated with the connector. These need to be removed manually. diff --git a/docs/reference/connector/apis/get-connector-api.asciidoc b/docs/reference/connector/apis/get-connector-api.asciidoc index cff13539f80cc..c8cbae668c261 100644 --- a/docs/reference/connector/apis/get-connector-api.asciidoc +++ b/docs/reference/connector/apis/get-connector-api.asciidoc @@ -33,6 +33,9 @@ To get started with Connector APIs, check out <`:: (Required, string) +`include_deleted`:: +(Optional, boolean) A flag indicating whether to also return connectors that have been soft-deleted. Defaults to `false`. + [[get-connector-api-response-codes]] ==== {api-response-codes-title} diff --git a/docs/reference/connector/apis/list-connectors-api.asciidoc b/docs/reference/connector/apis/list-connectors-api.asciidoc index 5cc099a6b67ee..d334e5d92c232 100644 --- a/docs/reference/connector/apis/list-connectors-api.asciidoc +++ b/docs/reference/connector/apis/list-connectors-api.asciidoc @@ -47,6 +47,9 @@ To get started with Connector APIs, check out < + +### MD5 +Computes the MD5 hash of the input. + +``` +FROM sample_data +| WHERE message != "Connection error" +| EVAL md5 = md5(message) +| KEEP message, md5; +``` diff --git a/docs/reference/esql/functions/kibana/docs/sha1.md b/docs/reference/esql/functions/kibana/docs/sha1.md new file mode 100644 index 0000000000000..a940aa133f06e --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/sha1.md @@ -0,0 +1,13 @@ + + +### SHA1 +Computes the SHA1 hash of the input. + +``` +FROM sample_data +| WHERE message != "Connection error" +| EVAL sha1 = sha1(message) +| KEEP message, sha1; +``` diff --git a/docs/reference/esql/functions/kibana/docs/sha256.md b/docs/reference/esql/functions/kibana/docs/sha256.md new file mode 100644 index 0000000000000..fbe576c7c20d6 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/sha256.md @@ -0,0 +1,13 @@ + + +### SHA256 +Computes the SHA256 hash of the input. + +``` +FROM sample_data +| WHERE message != "Connection error" +| EVAL sha256 = sha256(message) +| KEEP message, sha256; +``` diff --git a/docs/reference/esql/functions/layout/hash.asciidoc b/docs/reference/esql/functions/layout/hash.asciidoc index 27c55ada6319b..daf7fbf1170b2 100644 --- a/docs/reference/esql/functions/layout/hash.asciidoc +++ b/docs/reference/esql/functions/layout/hash.asciidoc @@ -12,3 +12,4 @@ image::esql/functions/signature/hash.svg[Embedded,opts=inline] include::../parameters/hash.asciidoc[] include::../description/hash.asciidoc[] include::../types/hash.asciidoc[] +include::../examples/hash.asciidoc[] diff --git a/docs/reference/esql/functions/layout/md5.asciidoc b/docs/reference/esql/functions/layout/md5.asciidoc new file mode 100644 index 0000000000000..82d3031d6bdfd --- /dev/null +++ b/docs/reference/esql/functions/layout/md5.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-md5]] +=== `MD5` + +*Syntax* + +[.text-center] +image::esql/functions/signature/md5.svg[Embedded,opts=inline] + +include::../parameters/md5.asciidoc[] +include::../description/md5.asciidoc[] +include::../types/md5.asciidoc[] +include::../examples/md5.asciidoc[] diff --git a/docs/reference/esql/functions/layout/sha1.asciidoc b/docs/reference/esql/functions/layout/sha1.asciidoc new file mode 100644 index 0000000000000..23e1e0e9ac2ab --- /dev/null +++ b/docs/reference/esql/functions/layout/sha1.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-sha1]] +=== `SHA1` + +*Syntax* + +[.text-center] +image::esql/functions/signature/sha1.svg[Embedded,opts=inline] + +include::../parameters/sha1.asciidoc[] +include::../description/sha1.asciidoc[] +include::../types/sha1.asciidoc[] +include::../examples/sha1.asciidoc[] diff --git a/docs/reference/esql/functions/layout/sha256.asciidoc b/docs/reference/esql/functions/layout/sha256.asciidoc new file mode 100644 index 0000000000000..d36a1345271f5 --- /dev/null +++ b/docs/reference/esql/functions/layout/sha256.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-sha256]] +=== `SHA256` + +*Syntax* + +[.text-center] +image::esql/functions/signature/sha256.svg[Embedded,opts=inline] + +include::../parameters/sha256.asciidoc[] +include::../description/sha256.asciidoc[] +include::../types/sha256.asciidoc[] +include::../examples/sha256.asciidoc[] diff --git a/docs/reference/esql/functions/parameters/md5.asciidoc b/docs/reference/esql/functions/parameters/md5.asciidoc new file mode 100644 index 0000000000000..99eba4dc2cb3d --- /dev/null +++ b/docs/reference/esql/functions/parameters/md5.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`input`:: +Input to hash. diff --git a/docs/reference/esql/functions/parameters/sha1.asciidoc b/docs/reference/esql/functions/parameters/sha1.asciidoc new file mode 100644 index 0000000000000..99eba4dc2cb3d --- /dev/null +++ b/docs/reference/esql/functions/parameters/sha1.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`input`:: +Input to hash. diff --git a/docs/reference/esql/functions/parameters/sha256.asciidoc b/docs/reference/esql/functions/parameters/sha256.asciidoc new file mode 100644 index 0000000000000..99eba4dc2cb3d --- /dev/null +++ b/docs/reference/esql/functions/parameters/sha256.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`input`:: +Input to hash. diff --git a/docs/reference/esql/functions/signature/md5.svg b/docs/reference/esql/functions/signature/md5.svg new file mode 100644 index 0000000000000..419af764a212e --- /dev/null +++ b/docs/reference/esql/functions/signature/md5.svg @@ -0,0 +1 @@ +MD5(input) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/sha1.svg b/docs/reference/esql/functions/signature/sha1.svg new file mode 100644 index 0000000000000..bab03a7eb88c8 --- /dev/null +++ b/docs/reference/esql/functions/signature/sha1.svg @@ -0,0 +1 @@ +SHA1(input) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/sha256.svg b/docs/reference/esql/functions/signature/sha256.svg new file mode 100644 index 0000000000000..b77126bbefbd8 --- /dev/null +++ b/docs/reference/esql/functions/signature/sha256.svg @@ -0,0 +1 @@ +SHA256(input) \ No newline at end of file diff --git a/docs/reference/esql/functions/string-functions.asciidoc b/docs/reference/esql/functions/string-functions.asciidoc index da9580a55151a..dd10e4c77581e 100644 --- a/docs/reference/esql/functions/string-functions.asciidoc +++ b/docs/reference/esql/functions/string-functions.asciidoc @@ -18,11 +18,14 @@ * <> * <> * <> +* <> * <> * <> * <> * <> * <> +* <> +* <> * <> * <> * <> @@ -43,11 +46,14 @@ include::layout/left.asciidoc[] include::layout/length.asciidoc[] include::layout/locate.asciidoc[] include::layout/ltrim.asciidoc[] +include::layout/md5.asciidoc[] include::layout/repeat.asciidoc[] include::layout/replace.asciidoc[] include::layout/reverse.asciidoc[] include::layout/right.asciidoc[] include::layout/rtrim.asciidoc[] +include::layout/sha1.asciidoc[] +include::layout/sha256.asciidoc[] include::layout/space.asciidoc[] include::layout/split.asciidoc[] include::layout/starts_with.asciidoc[] diff --git a/docs/reference/esql/functions/types/md5.asciidoc b/docs/reference/esql/functions/types/md5.asciidoc new file mode 100644 index 0000000000000..049a553397bbd --- /dev/null +++ b/docs/reference/esql/functions/types/md5.asciidoc @@ -0,0 +1,10 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +input | result +keyword | keyword +text | keyword +|=== diff --git a/docs/reference/esql/functions/types/sha1.asciidoc b/docs/reference/esql/functions/types/sha1.asciidoc new file mode 100644 index 0000000000000..049a553397bbd --- /dev/null +++ b/docs/reference/esql/functions/types/sha1.asciidoc @@ -0,0 +1,10 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +input | result +keyword | keyword +text | keyword +|=== diff --git a/docs/reference/esql/functions/types/sha256.asciidoc b/docs/reference/esql/functions/types/sha256.asciidoc new file mode 100644 index 0000000000000..049a553397bbd --- /dev/null +++ b/docs/reference/esql/functions/types/sha256.asciidoc @@ -0,0 +1,10 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +input | result +keyword | keyword +text | keyword +|=== diff --git a/docs/reference/indices/resolve-cluster.asciidoc b/docs/reference/indices/resolve-cluster.asciidoc index 430c377ff4402..5f19fe7484a91 100644 --- a/docs/reference/indices/resolve-cluster.asciidoc +++ b/docs/reference/indices/resolve-cluster.asciidoc @@ -29,8 +29,19 @@ For each cluster in the index expression, information is returned about: 3. whether there are any indices, aliases or data streams on that cluster that match the index expression 4. whether the search is likely to have errors returned when you do the {ccs} (including any - authorization errors if your user does not have permission to query the index) -5. cluster version information, including the Elasticsearch server version + authorization errors if your user does not have permission to query a remote cluster or + the indices on that cluster) +5. (in some cases) cluster version information, including the Elasticsearch server version + +[TIP] +==== +Whenever a security exception is returned for a remote cluster, that remote +will always be marked as connected=false in the response, since your user does not have +permissions to access that cluster (or perhaps the remote index) you are querying. +Once the proper security permissions are obtained, then you can rely on the `connected` field +in the response to determine whether the remote cluster is available and ready for querying. +==== + //// [source,console] diff --git a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc index e3c292cc534bf..30a1039f93db0 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc @@ -557,4 +557,3 @@ The API returns the following results: // TESTRESPONSE[s/"job_version" : "8.4.0"/"job_version" : $body.job_version/] // TESTRESPONSE[s/1656087283340/$body.$_path/] // TESTRESPONSE[s/"superuser"/"_es_test_root"/] -// TESTRESPONSE[s/"ignore_throttled" : true/"ignore_throttled" : true,"failure_store":"exclude"/] diff --git a/docs/reference/snapshot-restore/repository-s3.asciidoc b/docs/reference/snapshot-restore/repository-s3.asciidoc index 9b71fe9220385..446d3a409234d 100644 --- a/docs/reference/snapshot-restore/repository-s3.asciidoc +++ b/docs/reference/snapshot-restore/repository-s3.asciidoc @@ -350,7 +350,7 @@ include::repository-shared-settings.asciidoc[] will disable retries altogether. Note that if retries are enabled in the Azure client, each of these retries comprises that many client-level retries. -`get_register_retry_delay` +`get_register_retry_delay`:: (<>) Sets the time to wait before trying again if an attempt to read a <> fails. Defaults to `5s`. diff --git a/docs/reference/troubleshooting/common-issues/rejected-requests.asciidoc b/docs/reference/troubleshooting/common-issues/rejected-requests.asciidoc index 34ef388f2e3c9..83f6962a219ef 100644 --- a/docs/reference/troubleshooting/common-issues/rejected-requests.asciidoc +++ b/docs/reference/troubleshooting/common-issues/rejected-requests.asciidoc @@ -69,7 +69,7 @@ These stats are cumulative from node startup. Indexing pressure rejections appear as an `EsRejectedExecutionException`, and indicate that they were rejected due -to `coordinating_and_primary_bytes`, `coordinating`, `primary`, or `replica`. +to `combined_coordinating_and_primary`, `coordinating`, `primary`, or `replica`. These errors are often related to <>, <> sizing, or the ingest target's @@ -86,4 +86,4 @@ of diagnosing indexing pressure rejections. If {es} regularly rejects requests and other tasks, your cluster likely has high CPU usage or high JVM memory pressure. For tips, see <> and -<>. \ No newline at end of file +<>. diff --git a/libs/entitlement/asm-provider/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImpl.java b/libs/entitlement/asm-provider/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImpl.java index 0bcbc19047c80..eaf4d0ad98ef5 100644 --- a/libs/entitlement/asm-provider/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImpl.java +++ b/libs/entitlement/asm-provider/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImpl.java @@ -66,7 +66,7 @@ public MethodVisitor visitMethod( private static final Type CLASS_TYPE = Type.getType(Class.class); - static MethodKey parseCheckerMethodSignature(String checkerMethodName, Type[] checkerMethodArgumentTypes) { + static ParsedCheckerMethod parseCheckerMethodName(String checkerMethodName) { boolean targetMethodIsStatic; int classNameEndIndex = checkerMethodName.lastIndexOf("$$"); int methodNameStartIndex; @@ -100,9 +100,14 @@ static MethodKey parseCheckerMethodSignature(String checkerMethodName, Type[] ch if (targetClassName.isBlank()) { throw new IllegalArgumentException(String.format(Locale.ROOT, "Checker method %s has no class name", checkerMethodName)); } + return new ParsedCheckerMethod(targetClassName, targetMethodName, targetMethodIsStatic, targetMethodIsCtor); + } + + static MethodKey parseCheckerMethodSignature(String checkerMethodName, Type[] checkerMethodArgumentTypes) { + ParsedCheckerMethod checkerMethod = parseCheckerMethodName(checkerMethodName); final List targetParameterTypes; - if (targetMethodIsStatic || targetMethodIsCtor) { + if (checkerMethod.targetMethodIsStatic() || checkerMethod.targetMethodIsCtor()) { if (checkerMethodArgumentTypes.length < 1 || CLASS_TYPE.equals(checkerMethodArgumentTypes[0]) == false) { throw new IllegalArgumentException( String.format( @@ -130,7 +135,13 @@ static MethodKey parseCheckerMethodSignature(String checkerMethodName, Type[] ch } targetParameterTypes = Arrays.stream(checkerMethodArgumentTypes).skip(2).map(Type::getInternalName).toList(); } - boolean hasReceiver = (targetMethodIsStatic || targetMethodIsCtor) == false; - return new MethodKey(targetClassName, targetMethodName, targetParameterTypes); + return new MethodKey(checkerMethod.targetClassName(), checkerMethod.targetMethodName(), targetParameterTypes); } + + private record ParsedCheckerMethod( + String targetClassName, + String targetMethodName, + boolean targetMethodIsStatic, + boolean targetMethodIsCtor + ) {} } diff --git a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java index 67d006868b48d..8b03aeb178587 100644 --- a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java +++ b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java @@ -9,6 +9,13 @@ package org.elasticsearch.entitlement.bridge; +import java.io.InputStream; +import java.io.PrintStream; +import java.io.PrintWriter; +import java.net.ContentHandlerFactory; +import java.net.DatagramSocketImplFactory; +import java.net.FileNameMap; +import java.net.SocketImplFactory; import java.net.URL; import java.net.URLStreamHandlerFactory; import java.util.List; @@ -21,26 +28,44 @@ @SuppressWarnings("unused") // Called from instrumentation code inserted by the Entitlements agent public interface EntitlementChecker { + //////////////////// + // // Exit the JVM process + // + void check$java_lang_Runtime$exit(Class callerClass, Runtime runtime, int status); void check$java_lang_Runtime$halt(Class callerClass, Runtime runtime, int status); + void check$java_lang_System$$exit(Class callerClass, int status); + + //////////////////// + // // ClassLoader ctor + // + void check$java_lang_ClassLoader$(Class callerClass); void check$java_lang_ClassLoader$(Class callerClass, ClassLoader parent); void check$java_lang_ClassLoader$(Class callerClass, String name, ClassLoader parent); + //////////////////// + // // SecureClassLoader ctor + // + void check$java_security_SecureClassLoader$(Class callerClass); void check$java_security_SecureClassLoader$(Class callerClass, ClassLoader parent); void check$java_security_SecureClassLoader$(Class callerClass, String name, ClassLoader parent); + //////////////////// + // // URLClassLoader constructors + // + void check$java_net_URLClassLoader$(Class callerClass, URL[] urls); void check$java_net_URLClassLoader$(Class callerClass, URL[] urls, ClassLoader parent); @@ -51,7 +76,11 @@ public interface EntitlementChecker { void check$java_net_URLClassLoader$(Class callerClass, String name, URL[] urls, ClassLoader parent, URLStreamHandlerFactory factory); + //////////////////// + // // "setFactory" methods + // + void check$javax_net_ssl_HttpsURLConnection$setSSLSocketFactory(Class callerClass, HttpsURLConnection conn, SSLSocketFactory sf); void check$javax_net_ssl_HttpsURLConnection$$setDefaultSSLSocketFactory(Class callerClass, SSLSocketFactory sf); @@ -60,9 +89,82 @@ public interface EntitlementChecker { void check$javax_net_ssl_SSLContext$$setDefault(Class callerClass, SSLContext context); + //////////////////// + // // Process creation + // + void check$java_lang_ProcessBuilder$start(Class callerClass, ProcessBuilder that); void check$java_lang_ProcessBuilder$$startPipeline(Class callerClass, List builders); + //////////////////// + // + // JVM-wide state changes + // + + void check$java_lang_System$$setIn(Class callerClass, InputStream in); + + void check$java_lang_System$$setOut(Class callerClass, PrintStream out); + + void check$java_lang_System$$setErr(Class callerClass, PrintStream err); + + void check$java_lang_Runtime$addShutdownHook(Class callerClass, Runtime runtime, Thread hook); + + void check$java_lang_Runtime$removeShutdownHook(Class callerClass, Runtime runtime, Thread hook); + + void check$jdk_tools_jlink_internal_Jlink$(Class callerClass); + + void check$jdk_tools_jlink_internal_Main$$run(Class callerClass, PrintWriter out, PrintWriter err, String... args); + + void check$jdk_vm_ci_services_JVMCIServiceLocator$$getProviders(Class callerClass, Class service); + + void check$jdk_vm_ci_services_Services$$load(Class callerClass, Class service); + + void check$jdk_vm_ci_services_Services$$loadSingle(Class callerClass, Class service, boolean required); + + void check$com_sun_tools_jdi_VirtualMachineManagerImpl$$virtualMachineManager(Class callerClass); + + void check$java_lang_Thread$$setDefaultUncaughtExceptionHandler(Class callerClass, Thread.UncaughtExceptionHandler ueh); + + void check$java_util_spi_LocaleServiceProvider$(Class callerClass); + + void check$java_text_spi_BreakIteratorProvider$(Class callerClass); + + void check$java_text_spi_CollatorProvider$(Class callerClass); + + void check$java_text_spi_DateFormatProvider$(Class callerClass); + + void check$java_text_spi_DateFormatSymbolsProvider$(Class callerClass); + + void check$java_text_spi_DecimalFormatSymbolsProvider$(Class callerClass); + + void check$java_text_spi_NumberFormatProvider$(Class callerClass); + + void check$java_util_spi_CalendarDataProvider$(Class callerClass); + + void check$java_util_spi_CalendarNameProvider$(Class callerClass); + + void check$java_util_spi_CurrencyNameProvider$(Class callerClass); + + void check$java_util_spi_LocaleNameProvider$(Class callerClass); + + void check$java_util_spi_TimeZoneNameProvider$(Class callerClass); + + void check$java_util_logging_LogManager$(Class callerClass); + + void check$java_net_DatagramSocket$$setDatagramSocketImplFactory(Class callerClass, DatagramSocketImplFactory fac); + + void check$java_net_HttpURLConnection$$setFollowRedirects(Class callerClass, boolean set); + + void check$java_net_ServerSocket$$setSocketFactory(Class callerClass, SocketImplFactory fac); + + void check$java_net_Socket$$setSocketImplFactory(Class callerClass, SocketImplFactory fac); + + void check$java_net_URL$$setURLStreamHandlerFactory(Class callerClass, URLStreamHandlerFactory fac); + + void check$java_net_URLConnection$$setFileNameMap(Class callerClass, FileNameMap map); + + void check$java_net_URLConnection$$setContentHandlerFactory(Class callerClass, ContentHandlerFactory fac); + } diff --git a/libs/entitlement/qa/common/src/main/java/module-info.java b/libs/entitlement/qa/common/src/main/java/module-info.java index 2dd37e3174e08..211b7041e97ea 100644 --- a/libs/entitlement/qa/common/src/main/java/module-info.java +++ b/libs/entitlement/qa/common/src/main/java/module-info.java @@ -12,5 +12,8 @@ requires org.elasticsearch.base; requires org.elasticsearch.logging; + // Modules we'll attempt to use in order to exercise entitlements + requires java.logging; + exports org.elasticsearch.entitlement.qa.common; } diff --git a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/DummyImplementations.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/DummyImplementations.java new file mode 100644 index 0000000000000..6dbb684c71514 --- /dev/null +++ b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/DummyImplementations.java @@ -0,0 +1,334 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.qa.common; + +import java.net.InetAddress; +import java.net.Socket; +import java.security.cert.Certificate; +import java.text.BreakIterator; +import java.text.Collator; +import java.text.DateFormat; +import java.text.DateFormatSymbols; +import java.text.DecimalFormatSymbols; +import java.text.NumberFormat; +import java.text.spi.BreakIteratorProvider; +import java.text.spi.CollatorProvider; +import java.text.spi.DateFormatProvider; +import java.text.spi.DateFormatSymbolsProvider; +import java.text.spi.DecimalFormatSymbolsProvider; +import java.text.spi.NumberFormatProvider; +import java.util.Locale; +import java.util.Map; +import java.util.spi.CalendarDataProvider; +import java.util.spi.CalendarNameProvider; +import java.util.spi.CurrencyNameProvider; +import java.util.spi.LocaleNameProvider; +import java.util.spi.LocaleServiceProvider; +import java.util.spi.TimeZoneNameProvider; + +import javax.net.ssl.HttpsURLConnection; +import javax.net.ssl.SSLSocketFactory; + +/** + * A collection of concrete subclasses that we can instantiate but that don't actually work. + *

+ * A bit like Mockito but way more painful. + */ +class DummyImplementations { + + static class DummyLocaleServiceProvider extends LocaleServiceProvider { + + @Override + public Locale[] getAvailableLocales() { + throw unexpected(); + } + } + + static class DummyBreakIteratorProvider extends BreakIteratorProvider { + + @Override + public BreakIterator getWordInstance(Locale locale) { + throw unexpected(); + } + + @Override + public BreakIterator getLineInstance(Locale locale) { + throw unexpected(); + } + + @Override + public BreakIterator getCharacterInstance(Locale locale) { + throw unexpected(); + } + + @Override + public BreakIterator getSentenceInstance(Locale locale) { + throw unexpected(); + } + + @Override + public Locale[] getAvailableLocales() { + throw unexpected(); + } + } + + static class DummyCollatorProvider extends CollatorProvider { + + @Override + public Collator getInstance(Locale locale) { + throw unexpected(); + } + + @Override + public Locale[] getAvailableLocales() { + throw unexpected(); + } + } + + static class DummyDateFormatProvider extends DateFormatProvider { + + @Override + public DateFormat getTimeInstance(int style, Locale locale) { + throw unexpected(); + } + + @Override + public DateFormat getDateInstance(int style, Locale locale) { + throw unexpected(); + } + + @Override + public DateFormat getDateTimeInstance(int dateStyle, int timeStyle, Locale locale) { + throw unexpected(); + } + + @Override + public Locale[] getAvailableLocales() { + throw unexpected(); + } + } + + static class DummyDateFormatSymbolsProvider extends DateFormatSymbolsProvider { + + @Override + public DateFormatSymbols getInstance(Locale locale) { + throw unexpected(); + } + + @Override + public Locale[] getAvailableLocales() { + throw unexpected(); + } + } + + static class DummyDecimalFormatSymbolsProvider extends DecimalFormatSymbolsProvider { + + @Override + public DecimalFormatSymbols getInstance(Locale locale) { + throw unexpected(); + } + + @Override + public Locale[] getAvailableLocales() { + throw unexpected(); + } + } + + static class DummyNumberFormatProvider extends NumberFormatProvider { + + @Override + public NumberFormat getCurrencyInstance(Locale locale) { + throw unexpected(); + } + + @Override + public NumberFormat getIntegerInstance(Locale locale) { + throw unexpected(); + } + + @Override + public NumberFormat getNumberInstance(Locale locale) { + throw unexpected(); + } + + @Override + public NumberFormat getPercentInstance(Locale locale) { + throw unexpected(); + } + + @Override + public Locale[] getAvailableLocales() { + throw unexpected(); + } + } + + static class DummyCalendarDataProvider extends CalendarDataProvider { + + @Override + public int getFirstDayOfWeek(Locale locale) { + throw unexpected(); + } + + @Override + public int getMinimalDaysInFirstWeek(Locale locale) { + throw unexpected(); + } + + @Override + public Locale[] getAvailableLocales() { + throw unexpected(); + } + } + + static class DummyCalendarNameProvider extends CalendarNameProvider { + + @Override + public String getDisplayName(String calendarType, int field, int value, int style, Locale locale) { + throw unexpected(); + } + + @Override + public Map getDisplayNames(String calendarType, int field, int style, Locale locale) { + throw unexpected(); + } + + @Override + public Locale[] getAvailableLocales() { + throw unexpected(); + } + } + + static class DummyCurrencyNameProvider extends CurrencyNameProvider { + + @Override + public String getSymbol(String currencyCode, Locale locale) { + throw unexpected(); + } + + @Override + public Locale[] getAvailableLocales() { + throw unexpected(); + } + } + + static class DummyLocaleNameProvider extends LocaleNameProvider { + + @Override + public String getDisplayLanguage(String languageCode, Locale locale) { + throw unexpected(); + } + + @Override + public String getDisplayCountry(String countryCode, Locale locale) { + throw unexpected(); + } + + @Override + public String getDisplayVariant(String variant, Locale locale) { + throw unexpected(); + } + + @Override + public Locale[] getAvailableLocales() { + throw unexpected(); + } + } + + static class DummyTimeZoneNameProvider extends TimeZoneNameProvider { + + @Override + public String getDisplayName(String ID, boolean daylight, int style, Locale locale) { + throw unexpected(); + } + + @Override + public Locale[] getAvailableLocales() { + throw unexpected(); + } + } + + static class DummyHttpsURLConnection extends HttpsURLConnection { + DummyHttpsURLConnection() { + super(null); + } + + @Override + public void connect() { + throw unexpected(); + } + + @Override + public void disconnect() { + throw unexpected(); + } + + @Override + public boolean usingProxy() { + throw unexpected(); + } + + @Override + public String getCipherSuite() { + throw unexpected(); + } + + @Override + public Certificate[] getLocalCertificates() { + throw unexpected(); + } + + @Override + public Certificate[] getServerCertificates() { + throw unexpected(); + } + } + + static class DummySSLSocketFactory extends SSLSocketFactory { + @Override + public Socket createSocket(String host, int port) { + throw unexpected(); + } + + @Override + public Socket createSocket(String host, int port, InetAddress localHost, int localPort) { + throw unexpected(); + } + + @Override + public Socket createSocket(InetAddress host, int port) { + throw unexpected(); + } + + @Override + public Socket createSocket(InetAddress address, int port, InetAddress localAddress, int localPort) { + throw unexpected(); + } + + @Override + public String[] getDefaultCipherSuites() { + throw unexpected(); + } + + @Override + public String[] getSupportedCipherSuites() { + throw unexpected(); + } + + @Override + public Socket createSocket(Socket s, String host, int port, boolean autoClose) { + throw unexpected(); + } + } + + private static RuntimeException unexpected() { + return new IllegalStateException("This method isn't supposed to be called"); + } + +} diff --git a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java index 4afceedbe3f01..9869af4d85251 100644 --- a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java +++ b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java @@ -12,6 +12,18 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyBreakIteratorProvider; +import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyCalendarDataProvider; +import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyCalendarNameProvider; +import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyCollatorProvider; +import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyCurrencyNameProvider; +import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyDateFormatProvider; +import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyDateFormatSymbolsProvider; +import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyDecimalFormatSymbolsProvider; +import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyLocaleNameProvider; +import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyLocaleServiceProvider; +import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyNumberFormatProvider; +import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyTimeZoneNameProvider; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; import org.elasticsearch.rest.BaseRestHandler; @@ -21,8 +33,15 @@ import java.io.IOException; import java.io.UncheckedIOException; +import java.net.DatagramSocket; +import java.net.DatagramSocketImpl; +import java.net.DatagramSocketImplFactory; +import java.net.HttpURLConnection; +import java.net.ServerSocket; +import java.net.Socket; import java.net.URL; import java.net.URLClassLoader; +import java.net.URLConnection; import java.security.NoSuchAlgorithmException; import java.util.List; import java.util.Map; @@ -40,6 +59,7 @@ public class RestEntitlementsCheckAction extends BaseRestHandler { private static final Logger logger = LogManager.getLogger(RestEntitlementsCheckAction.class); + public static final Thread NO_OP_SHUTDOWN_HOOK = new Thread(() -> {}, "Shutdown hook for testing"); private final String prefix; record CheckAction(Runnable action, boolean isAlwaysDeniedToPlugins) { @@ -63,17 +83,52 @@ static CheckAction alwaysDenied(Runnable action) { private static final Map checkActions = Map.ofEntries( entry("runtime_exit", deniedToPlugins(RestEntitlementsCheckAction::runtimeExit)), entry("runtime_halt", deniedToPlugins(RestEntitlementsCheckAction::runtimeHalt)), + entry("system_exit", deniedToPlugins(RestEntitlementsCheckAction::systemExit)), entry("create_classloader", forPlugins(RestEntitlementsCheckAction::createClassLoader)), entry("processBuilder_start", deniedToPlugins(RestEntitlementsCheckAction::processBuilder_start)), entry("processBuilder_startPipeline", deniedToPlugins(RestEntitlementsCheckAction::processBuilder_startPipeline)), entry("set_https_connection_properties", forPlugins(RestEntitlementsCheckAction::setHttpsConnectionProperties)), entry("set_default_ssl_socket_factory", alwaysDenied(RestEntitlementsCheckAction::setDefaultSSLSocketFactory)), entry("set_default_hostname_verifier", alwaysDenied(RestEntitlementsCheckAction::setDefaultHostnameVerifier)), - entry("set_default_ssl_context", alwaysDenied(RestEntitlementsCheckAction::setDefaultSSLContext)) + entry("set_default_ssl_context", alwaysDenied(RestEntitlementsCheckAction::setDefaultSSLContext)), + entry("system_setIn", alwaysDenied(RestEntitlementsCheckAction::system$$setIn)), + entry("system_setOut", alwaysDenied(RestEntitlementsCheckAction::system$$setOut)), + entry("system_setErr", alwaysDenied(RestEntitlementsCheckAction::system$$setErr)), + entry("runtime_addShutdownHook", alwaysDenied(RestEntitlementsCheckAction::runtime$addShutdownHook)), + entry("runtime_removeShutdownHook", alwaysDenied(RestEntitlementsCheckAction::runtime$$removeShutdownHook)), + entry( + "thread_setDefaultUncaughtExceptionHandler", + alwaysDenied(RestEntitlementsCheckAction::thread$$setDefaultUncaughtExceptionHandler) + ), + entry("localeServiceProvider", alwaysDenied(RestEntitlementsCheckAction::localeServiceProvider$)), + entry("breakIteratorProvider", alwaysDenied(RestEntitlementsCheckAction::breakIteratorProvider$)), + entry("collatorProvider", alwaysDenied(RestEntitlementsCheckAction::collatorProvider$)), + entry("dateFormatProvider", alwaysDenied(RestEntitlementsCheckAction::dateFormatProvider$)), + entry("dateFormatSymbolsProvider", alwaysDenied(RestEntitlementsCheckAction::dateFormatSymbolsProvider$)), + entry("decimalFormatSymbolsProvider", alwaysDenied(RestEntitlementsCheckAction::decimalFormatSymbolsProvider$)), + entry("numberFormatProvider", alwaysDenied(RestEntitlementsCheckAction::numberFormatProvider$)), + entry("calendarDataProvider", alwaysDenied(RestEntitlementsCheckAction::calendarDataProvider$)), + entry("calendarNameProvider", alwaysDenied(RestEntitlementsCheckAction::calendarNameProvider$)), + entry("currencyNameProvider", alwaysDenied(RestEntitlementsCheckAction::currencyNameProvider$)), + entry("localeNameProvider", alwaysDenied(RestEntitlementsCheckAction::localeNameProvider$)), + entry("timeZoneNameProvider", alwaysDenied(RestEntitlementsCheckAction::timeZoneNameProvider$)), + entry("logManager", alwaysDenied(RestEntitlementsCheckAction::logManager$)), + + // This group is a bit nasty: if entitlements don't prevent these, then networking is + // irreparably borked for the remainder of the test run. + entry( + "datagramSocket_setDatagramSocketImplFactory", + alwaysDenied(RestEntitlementsCheckAction::datagramSocket$$setDatagramSocketImplFactory) + ), + entry("httpURLConnection_setFollowRedirects", alwaysDenied(RestEntitlementsCheckAction::httpURLConnection$$setFollowRedirects)), + entry("serverSocket_setSocketFactory", alwaysDenied(RestEntitlementsCheckAction::serverSocket$$setSocketFactory)), + entry("socket_setSocketImplFactory", alwaysDenied(RestEntitlementsCheckAction::socket$$setSocketImplFactory)), + entry("url_setURLStreamHandlerFactory", alwaysDenied(RestEntitlementsCheckAction::url$$setURLStreamHandlerFactory)), + entry("urlConnection_setFileNameMap", alwaysDenied(RestEntitlementsCheckAction::urlConnection$$setFileNameMap)), + entry("urlConnection_setContentHandlerFactory", alwaysDenied(RestEntitlementsCheckAction::urlConnection$$setContentHandlerFactory)) ); private static void setDefaultSSLContext() { - logger.info("Calling SSLContext.setDefault"); try { SSLContext.setDefault(SSLContext.getDefault()); } catch (NoSuchAlgorithmException e) { @@ -82,13 +137,11 @@ private static void setDefaultSSLContext() { } private static void setDefaultHostnameVerifier() { - logger.info("Calling HttpsURLConnection.setDefaultHostnameVerifier"); HttpsURLConnection.setDefaultHostnameVerifier((hostname, session) -> false); } private static void setDefaultSSLSocketFactory() { - logger.info("Calling HttpsURLConnection.setDefaultSSLSocketFactory"); - HttpsURLConnection.setDefaultSSLSocketFactory(new TestSSLSocketFactory()); + HttpsURLConnection.setDefaultSSLSocketFactory(new DummyImplementations.DummySSLSocketFactory()); } @SuppressForbidden(reason = "Specifically testing Runtime.exit") @@ -101,6 +154,11 @@ private static void runtimeHalt() { Runtime.getRuntime().halt(123); } + @SuppressForbidden(reason = "Specifically testing System.exit") + private static void systemExit() { + System.exit(123); + } + private static void createClassLoader() { try (var classLoader = new URLClassLoader("test", new URL[0], RestEntitlementsCheckAction.class.getClassLoader())) { logger.info("Created URLClassLoader [{}]", classLoader.getName()); @@ -126,9 +184,137 @@ private static void processBuilder_startPipeline() { } private static void setHttpsConnectionProperties() { - logger.info("Calling setSSLSocketFactory"); - var connection = new TestHttpsURLConnection(); - connection.setSSLSocketFactory(new TestSSLSocketFactory()); + new DummyImplementations.DummyHttpsURLConnection().setSSLSocketFactory(new DummyImplementations.DummySSLSocketFactory()); + } + + private static void system$$setIn() { + System.setIn(System.in); + } + + @SuppressForbidden(reason = "This should be a no-op so we don't interfere with system streams") + private static void system$$setOut() { + System.setOut(System.out); + } + + @SuppressForbidden(reason = "This should be a no-op so we don't interfere with system streams") + private static void system$$setErr() { + System.setErr(System.err); + } + + private static void runtime$addShutdownHook() { + Runtime.getRuntime().addShutdownHook(NO_OP_SHUTDOWN_HOOK); + } + + private static void runtime$$removeShutdownHook() { + Runtime.getRuntime().removeShutdownHook(NO_OP_SHUTDOWN_HOOK); + } + + private static void thread$$setDefaultUncaughtExceptionHandler() { + Thread.setDefaultUncaughtExceptionHandler(Thread.getDefaultUncaughtExceptionHandler()); + } + + private static void localeServiceProvider$() { + new DummyLocaleServiceProvider(); + } + + private static void breakIteratorProvider$() { + new DummyBreakIteratorProvider(); + } + + private static void collatorProvider$() { + new DummyCollatorProvider(); + } + + private static void dateFormatProvider$() { + new DummyDateFormatProvider(); + } + + private static void dateFormatSymbolsProvider$() { + new DummyDateFormatSymbolsProvider(); + } + + private static void decimalFormatSymbolsProvider$() { + new DummyDecimalFormatSymbolsProvider(); + } + + private static void numberFormatProvider$() { + new DummyNumberFormatProvider(); + } + + private static void calendarDataProvider$() { + new DummyCalendarDataProvider(); + } + + private static void calendarNameProvider$() { + new DummyCalendarNameProvider(); + } + + private static void currencyNameProvider$() { + new DummyCurrencyNameProvider(); + } + + private static void localeNameProvider$() { + new DummyLocaleNameProvider(); + } + + private static void timeZoneNameProvider$() { + new DummyTimeZoneNameProvider(); + } + + private static void logManager$() { + new java.util.logging.LogManager() { + }; + } + + @SuppressWarnings("deprecation") + @SuppressForbidden(reason = "We're required to prevent calls to this forbidden API") + private static void datagramSocket$$setDatagramSocketImplFactory() { + try { + DatagramSocket.setDatagramSocketImplFactory(new DatagramSocketImplFactory() { + @Override + public DatagramSocketImpl createDatagramSocketImpl() { + throw new IllegalStateException(); + } + }); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + private static void httpURLConnection$$setFollowRedirects() { + HttpURLConnection.setFollowRedirects(HttpURLConnection.getFollowRedirects()); + } + + @SuppressWarnings("deprecation") + @SuppressForbidden(reason = "We're required to prevent calls to this forbidden API") + private static void serverSocket$$setSocketFactory() { + try { + ServerSocket.setSocketFactory(() -> { throw new IllegalStateException(); }); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + @SuppressWarnings("deprecation") + @SuppressForbidden(reason = "We're required to prevent calls to this forbidden API") + private static void socket$$setSocketImplFactory() { + try { + Socket.setSocketImplFactory(() -> { throw new IllegalStateException(); }); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + private static void url$$setURLStreamHandlerFactory() { + URL.setURLStreamHandlerFactory(__ -> { throw new IllegalStateException(); }); + } + + private static void urlConnection$$setFileNameMap() { + URLConnection.setFileNameMap(__ -> { throw new IllegalStateException(); }); + } + + private static void urlConnection$$setContentHandlerFactory() { + URLConnection.setContentHandlerFactory(__ -> { throw new IllegalStateException(); }); } public RestEntitlementsCheckAction(String prefix) { diff --git a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/TestHttpsURLConnection.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/TestHttpsURLConnection.java deleted file mode 100644 index 5a96e582db02b..0000000000000 --- a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/TestHttpsURLConnection.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.entitlement.qa.common; - -import java.io.IOException; -import java.security.cert.Certificate; - -import javax.net.ssl.HttpsURLConnection; -import javax.net.ssl.SSLPeerUnverifiedException; - -class TestHttpsURLConnection extends HttpsURLConnection { - TestHttpsURLConnection() { - super(null); - } - - @Override - public void connect() throws IOException {} - - @Override - public void disconnect() {} - - @Override - public boolean usingProxy() { - return false; - } - - @Override - public String getCipherSuite() { - return ""; - } - - @Override - public Certificate[] getLocalCertificates() { - return new Certificate[0]; - } - - @Override - public Certificate[] getServerCertificates() throws SSLPeerUnverifiedException { - return new Certificate[0]; - } -} diff --git a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/TestSSLSocketFactory.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/TestSSLSocketFactory.java deleted file mode 100644 index feb19df780175..0000000000000 --- a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/TestSSLSocketFactory.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.entitlement.qa.common; - -import java.io.IOException; -import java.net.InetAddress; -import java.net.Socket; -import java.net.UnknownHostException; - -import javax.net.ssl.SSLSocketFactory; - -class TestSSLSocketFactory extends SSLSocketFactory { - @Override - public Socket createSocket(String host, int port) throws IOException, UnknownHostException { - return null; - } - - @Override - public Socket createSocket(String host, int port, InetAddress localHost, int localPort) { - return null; - } - - @Override - public Socket createSocket(InetAddress host, int port) throws IOException { - return null; - } - - @Override - public Socket createSocket(InetAddress address, int port, InetAddress localAddress, int localPort) throws IOException { - return null; - } - - @Override - public String[] getDefaultCipherSuites() { - return new String[0]; - } - - @Override - public String[] getSupportedCipherSuites() { - return new String[0]; - } - - @Override - public Socket createSocket(Socket s, String host, int port, boolean autoClose) throws IOException { - return null; - } -} diff --git a/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsDeniedIT.java b/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsDeniedIT.java index b17a57512cde0..e2e5a3c4c61e6 100644 --- a/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsDeniedIT.java +++ b/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsDeniedIT.java @@ -32,7 +32,7 @@ public class EntitlementsDeniedIT extends ESRestTestCase { .systemProperty("es.entitlements.enabled", "true") .setting("xpack.security.enabled", "false") // Logs in libs/entitlement/qa/build/test-results/javaRestTest/TEST-org.elasticsearch.entitlement.qa.EntitlementsDeniedIT.xml - .setting("logger.org.elasticsearch.entitlement", "TRACE") + // .setting("logger.org.elasticsearch.entitlement", "DEBUG") .build(); @Override diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java index 450786ee57d86..686fb73e10bc2 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java @@ -12,6 +12,13 @@ import org.elasticsearch.entitlement.bridge.EntitlementChecker; import org.elasticsearch.entitlement.runtime.policy.PolicyManager; +import java.io.InputStream; +import java.io.PrintStream; +import java.io.PrintWriter; +import java.net.ContentHandlerFactory; +import java.net.DatagramSocketImplFactory; +import java.net.FileNameMap; +import java.net.SocketImplFactory; import java.net.URL; import java.net.URLStreamHandlerFactory; import java.util.List; @@ -44,6 +51,11 @@ public ElasticsearchEntitlementChecker(PolicyManager policyManager) { policyManager.checkExitVM(callerClass); } + @Override + public void check$java_lang_System$$exit(Class callerClass, int status) { + policyManager.checkExitVM(callerClass); + } + @Override public void check$java_lang_ClassLoader$(Class callerClass) { policyManager.checkCreateClassLoader(callerClass); @@ -115,6 +127,166 @@ public ElasticsearchEntitlementChecker(PolicyManager policyManager) { policyManager.checkStartProcess(callerClass); } + @Override + public void check$java_lang_System$$setIn(Class callerClass, InputStream in) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_lang_System$$setOut(Class callerClass, PrintStream out) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_lang_System$$setErr(Class callerClass, PrintStream err) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_lang_Runtime$addShutdownHook(Class callerClass, Runtime runtime, Thread hook) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_lang_Runtime$removeShutdownHook(Class callerClass, Runtime runtime, Thread hook) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$jdk_tools_jlink_internal_Jlink$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$jdk_tools_jlink_internal_Main$$run(Class callerClass, PrintWriter out, PrintWriter err, String... args) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$jdk_vm_ci_services_JVMCIServiceLocator$$getProviders(Class callerClass, Class service) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$jdk_vm_ci_services_Services$$load(Class callerClass, Class service) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$jdk_vm_ci_services_Services$$loadSingle(Class callerClass, Class service, boolean required) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$com_sun_tools_jdi_VirtualMachineManagerImpl$$virtualMachineManager(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_lang_Thread$$setDefaultUncaughtExceptionHandler(Class callerClass, Thread.UncaughtExceptionHandler ueh) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_util_spi_LocaleServiceProvider$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_text_spi_BreakIteratorProvider$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_text_spi_CollatorProvider$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_text_spi_DateFormatProvider$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_text_spi_DateFormatSymbolsProvider$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_text_spi_DecimalFormatSymbolsProvider$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_text_spi_NumberFormatProvider$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_util_spi_CalendarDataProvider$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_util_spi_CalendarNameProvider$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_util_spi_CurrencyNameProvider$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_util_spi_LocaleNameProvider$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_util_spi_TimeZoneNameProvider$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_util_logging_LogManager$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_net_DatagramSocket$$setDatagramSocketImplFactory(Class callerClass, DatagramSocketImplFactory fac) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_net_HttpURLConnection$$setFollowRedirects(Class callerClass, boolean set) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_net_ServerSocket$$setSocketFactory(Class callerClass, SocketImplFactory fac) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_net_Socket$$setSocketImplFactory(Class callerClass, SocketImplFactory fac) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_net_URL$$setURLStreamHandlerFactory(Class callerClass, URLStreamHandlerFactory fac) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_net_URLConnection$$setFileNameMap(Class callerClass, FileNameMap map) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_net_URLConnection$$setContentHandlerFactory(Class callerClass, ContentHandlerFactory fac) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + @Override public void check$javax_net_ssl_HttpsURLConnection$setSSLSocketFactory( Class callerClass, @@ -126,16 +298,16 @@ public ElasticsearchEntitlementChecker(PolicyManager policyManager) { @Override public void check$javax_net_ssl_HttpsURLConnection$$setDefaultSSLSocketFactory(Class callerClass, SSLSocketFactory sf) { - policyManager.checkSetGlobalHttpsConnectionProperties(callerClass); + policyManager.checkChangeJVMGlobalState(callerClass); } @Override public void check$javax_net_ssl_HttpsURLConnection$$setDefaultHostnameVerifier(Class callerClass, HostnameVerifier hv) { - policyManager.checkSetGlobalHttpsConnectionProperties(callerClass); + policyManager.checkChangeJVMGlobalState(callerClass); } @Override public void check$javax_net_ssl_SSLContext$$setDefault(Class callerClass, SSLContext context) { - policyManager.checkSetGlobalHttpsConnectionProperties(callerClass); + policyManager.checkChangeJVMGlobalState(callerClass); } } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java index 188ce1d747db6..9c45f2d42f03a 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java @@ -23,12 +23,15 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.function.Function; +import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; import static java.lang.StackWalker.Option.RETAIN_CLASS_REFERENCE; import static java.util.Objects.requireNonNull; +import static java.util.function.Predicate.not; import static java.util.stream.Collectors.groupingBy; +import static java.util.stream.Collectors.toUnmodifiableMap; public class PolicyManager { private static final Logger logger = LogManager.getLogger(PolicyManager.class); @@ -93,13 +96,13 @@ public PolicyManager( this.agentEntitlements = agentEntitlements; this.pluginsEntitlements = requireNonNull(pluginPolicies).entrySet() .stream() - .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, e -> buildScopeEntitlementsMap(e.getValue()))); + .collect(toUnmodifiableMap(Map.Entry::getKey, e -> buildScopeEntitlementsMap(e.getValue()))); this.pluginResolver = pluginResolver; this.entitlementsModule = entitlementsModule; } private static Map> buildScopeEntitlementsMap(Policy policy) { - return policy.scopes().stream().collect(Collectors.toUnmodifiableMap(scope -> scope.moduleName(), scope -> scope.entitlements())); + return policy.scopes().stream().collect(toUnmodifiableMap(Scope::moduleName, Scope::entitlements)); } public void checkStartProcess(Class callerClass) { @@ -122,6 +125,26 @@ private void neverEntitled(Class callerClass, String operationDescription) { ); } + /** + * @param operationDescription is only called when the operation is not trivially allowed, meaning the check is about to fail; + * therefore, its performance is not a major concern. + */ + private void neverEntitled(Class callerClass, Supplier operationDescription) { + var requestingModule = requestingClass(callerClass); + if (isTriviallyAllowed(requestingModule)) { + return; + } + + throw new NotEntitledException( + Strings.format( + "Not entitled: caller [%s], module [%s], operation [%s]", + callerClass, + requestingModule.getName(), + operationDescription.get() + ) + ); + } + public void checkExitVM(Class callerClass) { checkEntitlementPresent(callerClass, ExitVMEntitlement.class); } @@ -134,8 +157,23 @@ public void checkSetHttpsConnectionProperties(Class callerClass) { checkEntitlementPresent(callerClass, SetHttpsConnectionPropertiesEntitlement.class); } - public void checkSetGlobalHttpsConnectionProperties(Class callerClass) { - neverEntitled(callerClass, "set global https connection properties"); + public void checkChangeJVMGlobalState(Class callerClass) { + neverEntitled(callerClass, () -> { + // Look up the check$ method to compose an informative error message. + // This way, we don't need to painstakingly describe every individual global-state change. + Optional checkMethodName = StackWalker.getInstance() + .walk( + frames -> frames.map(StackFrame::getMethodName) + .dropWhile(not(methodName -> methodName.startsWith("check$"))) + .findFirst() + ); + return checkMethodName.map(this::operationDescription).orElse("change JVM global state"); + }); + } + + private String operationDescription(String methodName) { + // TODO: Use a more human-readable description. Perhaps share code with InstrumentationServiceImpl.parseCheckerMethodName + return methodName.substring(methodName.indexOf('$')); } private void checkEntitlementPresent(Class callerClass, Class entitlementClass) { diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java index 99b2a4510bf93..f66683a787bc0 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java @@ -27,7 +27,7 @@ import java.util.Set; import java.util.function.Function; -import static org.elasticsearch.common.settings.Setting.Property.DeprecatedWarning; +import static org.elasticsearch.common.settings.Setting.Property.Deprecated; import static org.elasticsearch.common.settings.Setting.Property.NodeScope; import static org.elasticsearch.common.settings.Setting.Property.OperatorDynamic; @@ -250,7 +250,7 @@ private static Setting concreteAgentSetting(String namespace, String qua TELEMETRY_SETTING_PREFIX + "agent.", LEGACY_TRACING_APM_SETTING_PREFIX + "agent.", (namespace, qualifiedKey) -> qualifiedKey.startsWith(LEGACY_TRACING_APM_SETTING_PREFIX) - ? concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic, DeprecatedWarning) + ? concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic, Deprecated) : concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic) ); @@ -262,7 +262,7 @@ private static Setting concreteAgentSetting(String namespace, String qua LEGACY_TRACING_APM_SETTING_PREFIX + "names.include", OperatorDynamic, NodeScope, - DeprecatedWarning + Deprecated ); public static final Setting> TELEMETRY_TRACING_NAMES_INCLUDE_SETTING = Setting.listSetting( @@ -281,7 +281,7 @@ private static Setting concreteAgentSetting(String namespace, String qua LEGACY_TRACING_APM_SETTING_PREFIX + "names.exclude", OperatorDynamic, NodeScope, - DeprecatedWarning + Deprecated ); public static final Setting> TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING = Setting.listSetting( @@ -314,7 +314,7 @@ private static Setting concreteAgentSetting(String namespace, String qua ), OperatorDynamic, NodeScope, - DeprecatedWarning + Deprecated ); public static final Setting> TELEMETRY_TRACING_SANITIZE_FIELD_NAMES = Setting.listSetting( @@ -334,7 +334,7 @@ private static Setting concreteAgentSetting(String namespace, String qua false, OperatorDynamic, NodeScope, - DeprecatedWarning + Deprecated ); public static final Setting TELEMETRY_TRACING_ENABLED_SETTING = Setting.boolSetting( @@ -358,7 +358,7 @@ private static Setting concreteAgentSetting(String namespace, String qua public static final Setting TRACING_APM_SECRET_TOKEN_SETTING = SecureSetting.secureString( LEGACY_TRACING_APM_SETTING_PREFIX + "secret_token", null, - DeprecatedWarning + Deprecated ); public static final Setting TELEMETRY_SECRET_TOKEN_SETTING = SecureSetting.secureString( @@ -373,7 +373,7 @@ private static Setting concreteAgentSetting(String namespace, String qua public static final Setting TRACING_APM_API_KEY_SETTING = SecureSetting.secureString( LEGACY_TRACING_APM_SETTING_PREFIX + "api_key", null, - DeprecatedWarning + Deprecated ); public static final Setting TELEMETRY_API_KEY_SETTING = SecureSetting.secureString( diff --git a/modules/data-streams/build.gradle b/modules/data-streams/build.gradle index 97a5fabd79f4c..60bc8d1dc6a92 100644 --- a/modules/data-streams/build.gradle +++ b/modules/data-streams/build.gradle @@ -20,6 +20,7 @@ restResources { dependencies { testImplementation project(path: ':test:test-clusters') + testImplementation project(":modules:mapper-extras") internalClusterTestImplementation project(":modules:mapper-extras") } @@ -70,4 +71,16 @@ tasks.named("yamlRestCompatTestTransform").configure({ task -> task.skipTest("data_stream/200_rollover_failure_store/Lazily roll over a data stream's failure store after a shard failure", "Configuring the failure store via data stream templates is not supported anymore.") task.skipTest("data_stream/200_rollover_failure_store/Don't roll over a data stream's failure store when conditions aren't met", "Configuring the failure store via data stream templates is not supported anymore.") task.skipTest("data_stream/200_rollover_failure_store/Roll over a data stream's failure store with conditions", "Configuring the failure store via data stream templates is not supported anymore.") + + task.skipTest("data_stream/200_rollover_failure_store/Rolling over a failure store on a data stream without the failure store enabled should work", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/200_rollover_failure_store/Rolling over an uninitialized failure store should initialize it", "Rolling over a data stream using target_failure_store is no longer supported.") + + task.skipTest("data_stream/210_rollover_failure_store/A failure store marked for lazy rollover should only be rolled over when there is a failure", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Don't roll over a data stream's failure store when conditions aren't met", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Rolling over a failure store on a data stream without the failure store enabled should work", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Rolling over an uninitialized failure store should initialize it", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Roll over a data stream's failure store with conditions", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Lazily roll over a data stream's failure store after an ingest failure", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Lazily roll over a data stream's failure store after a shard failure", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Roll over a data stream's failure store without conditions", "Rolling over a data stream using target_failure_store is no longer supported.") }) diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java index ac73385a97d70..91f18ad3573fd 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java @@ -493,7 +493,7 @@ private static ShardStats getShardStats(IndexMetadata indexMeta, int shardIndex, CommonStats stats = new CommonStats(); stats.docs = new DocsStats(100, 0, randomByteSizeValue().getBytes()); stats.store = new StoreStats(); - stats.indexing = new IndexingStats(new IndexingStats.Stats(1, 1, 1, 1, 1, 1, 1, 1, false, 1, targetWriteLoad, 1)); + stats.indexing = new IndexingStats(new IndexingStats.Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, false, 1, targetWriteLoad, 1)); return new ShardStats(shardRouting, new ShardPath(false, path, path, shardId), stats, null, null, null, false, 0); } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java index 32d080ccc46b1..ac828630b0463 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java @@ -31,11 +31,13 @@ import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.datastreams.DeleteDataStreamAction; import org.elasticsearch.action.datastreams.GetDataStreamAction; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamAlias; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.index.Index; @@ -136,10 +138,7 @@ public void setup() throws Exception { assertTrue(response.isAcknowledged()); // Initialize the failure store. - RolloverRequest rolloverRequest = new RolloverRequest("with-fs", null); - rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build() - ); + RolloverRequest rolloverRequest = new RolloverRequest("with-fs::failures", null); response = client.execute(RolloverAction.INSTANCE, rolloverRequest).get(); assertTrue(response.isAcknowledged()); @@ -345,7 +344,7 @@ public void testFailureStoreSnapshotAndRestore() throws Exception { .cluster() .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) - .setIndices(dataStreamName) + .setIndices(IndexNameExpressionResolver.combineSelector(dataStreamName, IndexComponentSelector.ALL_APPLICABLE)) .setIncludeGlobalState(false) .get(); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java index e9eaf7b5faddb..bee3989d20ff0 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java @@ -20,11 +20,12 @@ import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.core.Strings; @@ -194,9 +195,9 @@ public void testRejectionFromFailureStore() throws IOException { createDataStream(); // Initialize failure store. - var rolloverRequest = new RolloverRequest(dataStream, null); - rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build() + var rolloverRequest = new RolloverRequest( + IndexNameExpressionResolver.combineSelector(dataStream, IndexComponentSelector.FAILURES), + null ); var rolloverResponse = client().execute(RolloverAction.INSTANCE, rolloverRequest).actionGet(); var failureStoreIndex = rolloverResponse.getNewIndex(); diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamOptionsIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamOptionsIT.java index 482867d072fc2..54e21d5155ed1 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamOptionsIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamOptionsIT.java @@ -60,7 +60,7 @@ public void setup() throws IOException { assertOK(client().performRequest(new Request("PUT", "/_data_stream/" + DATA_STREAM_NAME))); // Initialize the failure store. - assertOK(client().performRequest(new Request("POST", DATA_STREAM_NAME + "/_rollover?target_failure_store"))); + assertOK(client().performRequest(new Request("POST", DATA_STREAM_NAME + "::failures/_rollover"))); ensureGreen(DATA_STREAM_NAME); final Response dataStreamResponse = client().performRequest(new Request("GET", "/_data_stream/" + DATA_STREAM_NAME)); diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java deleted file mode 100644 index 85b914be30b2c..0000000000000 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.datastreams; - -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.client.ResponseException; -import org.junit.Before; - -import java.io.IOException; -import java.util.List; -import java.util.Map; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; - -/** - * This should be a yaml test, but in order to write one we would need to expose the new parameter in the rest-api-spec. - * We do not want to do that until the feature flag is removed. For this reason, we temporarily, test the affected APIs here. - * Please convert this to a yaml test when the feature flag is removed. - */ -public class FailureStoreQueryParamIT extends DisabledSecurityDataStreamTestCase { - - private static final String DATA_STREAM_NAME = "failure-data-stream"; - private String backingIndex; - private String failureStoreIndex; - - @SuppressWarnings("unchecked") - @Before - public void setup() throws IOException { - Request putComposableIndexTemplateRequest = new Request("POST", "/_index_template/ds-template"); - putComposableIndexTemplateRequest.setJsonEntity(""" - { - "index_patterns": ["failure-data-stream"], - "template": { - "settings": { - "number_of_replicas": 0 - }, - "data_stream_options": { - "failure_store": { - "enabled": true - } - } - }, - "data_stream": { - } - } - """); - assertOK(client().performRequest(putComposableIndexTemplateRequest)); - - assertOK(client().performRequest(new Request("PUT", "/_data_stream/" + DATA_STREAM_NAME))); - // Initialize the failure store. - assertOK(client().performRequest(new Request("POST", DATA_STREAM_NAME + "/_rollover?target_failure_store"))); - ensureGreen(DATA_STREAM_NAME); - - final Response dataStreamResponse = client().performRequest(new Request("GET", "/_data_stream/" + DATA_STREAM_NAME)); - List dataStreams = (List) entityAsMap(dataStreamResponse).get("data_streams"); - assertThat(dataStreams.size(), is(1)); - Map dataStream = (Map) dataStreams.get(0); - assertThat(dataStream.get("name"), equalTo(DATA_STREAM_NAME)); - List backingIndices = getIndices(dataStream); - assertThat(backingIndices.size(), is(1)); - List failureStore = getFailureStore(dataStream); - assertThat(failureStore.size(), is(1)); - backingIndex = backingIndices.get(0); - failureStoreIndex = failureStore.get(0); - } - - public void testGetIndexApi() throws IOException { - { - final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME)); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(2)); - assertThat(indices.containsKey(backingIndex), is(true)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - { - final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "?failure_store=exclude")); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(backingIndex), is(true)); - } - { - final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "?failure_store=only")); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - } - - @SuppressWarnings("unchecked") - public void testGetIndexStatsApi() throws IOException { - { - final Response statsResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "/_stats")); - Map indices = (Map) entityAsMap(statsResponse).get("indices"); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(backingIndex), is(true)); - } - { - final Response statsResponse = client().performRequest( - new Request("GET", "/" + DATA_STREAM_NAME + "/_stats?failure_store=include") - ); - Map indices = (Map) entityAsMap(statsResponse).get("indices"); - assertThat(indices.size(), is(2)); - assertThat(indices.containsKey(backingIndex), is(true)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - { - final Response statsResponse = client().performRequest( - new Request("GET", "/" + DATA_STREAM_NAME + "/_stats?failure_store=only") - ); - Map indices = (Map) entityAsMap(statsResponse).get("indices"); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - } - - public void testGetIndexSettingsApi() throws IOException { - { - final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "/_settings")); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(backingIndex), is(true)); - } - { - final Response indicesResponse = client().performRequest( - new Request("GET", "/" + DATA_STREAM_NAME + "/_settings?failure_store=include") - ); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(2)); - assertThat(indices.containsKey(backingIndex), is(true)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - { - final Response indicesResponse = client().performRequest( - new Request("GET", "/" + DATA_STREAM_NAME + "/_settings?failure_store=only") - ); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - } - - public void testGetIndexMappingApi() throws IOException { - { - final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "/_mapping")); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(backingIndex), is(true)); - } - { - final Response indicesResponse = client().performRequest( - new Request("GET", "/" + DATA_STREAM_NAME + "/_mapping?failure_store=include") - ); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(2)); - assertThat(indices.containsKey(backingIndex), is(true)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - { - final Response indicesResponse = client().performRequest( - new Request("GET", "/" + DATA_STREAM_NAME + "/_mapping?failure_store=only") - ); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - } - - @SuppressWarnings("unchecked") - public void testPutIndexMappingApi() throws IOException { - { - final Request mappingRequest = new Request("PUT", "/" + DATA_STREAM_NAME + "/_mapping"); - mappingRequest.setJsonEntity(""" - { - "properties": { - "email": { - "type": "keyword" - } - } - } - """); - assertAcknowledged(client().performRequest(mappingRequest)); - } - { - final Request mappingRequest = new Request("PUT", "/" + DATA_STREAM_NAME + "/_mapping?failure_store=include"); - mappingRequest.setJsonEntity(""" - { - "properties": { - "email": { - "type": "keyword" - } - } - } - """); - ResponseException responseException = expectThrows(ResponseException.class, () -> client().performRequest(mappingRequest)); - Map response = entityAsMap(responseException.getResponse()); - assertThat(((Map) response.get("error")).get("reason"), is("failure index not supported")); - } - } - - @SuppressWarnings("unchecked") - private List getFailureStore(Map response) { - var failureStore = (Map) response.get("failure_store"); - return getIndices(failureStore); - - } - - @SuppressWarnings("unchecked") - private List getIndices(Map response) { - List> indices = (List>) response.get("indices"); - return indices.stream().map(index -> index.get("index_name")).toList(); - } -} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java index 1d3b1b676282a..cc5e00d8283ad 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.datastreams.DataStreamsStatsAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -102,10 +103,11 @@ protected ClusterBlockException checkRequestBlock( @Override protected String[] resolveConcreteIndexNames(ClusterState clusterState, DataStreamsStatsAction.Request request) { - return DataStreamsActionUtil.resolveConcreteIndexNames( + return DataStreamsActionUtil.resolveConcreteIndexNamesWithSelector( indexNameExpressionResolver, clusterState, request.indices(), + IndexComponentSelector.ALL_APPLICABLE, request.indicesOptions() ).toArray(String[]::new); } @@ -163,13 +165,17 @@ protected DataStreamsStatsAction.DataStreamShardStats readShardResult(StreamInpu request.indicesOptions(), request.indices() ); - for (String abstractionName : abstractionNames) { - IndexAbstraction indexAbstraction = indicesLookup.get(abstractionName); + for (String abstraction : abstractionNames) { + IndexAbstraction indexAbstraction = indicesLookup.get(abstraction); assert indexAbstraction != null; if (indexAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM) { DataStream dataStream = (DataStream) indexAbstraction; AggregatedStats stats = aggregatedDataStreamsStats.computeIfAbsent(dataStream.getName(), s -> new AggregatedStats()); - dataStream.getIndices().stream().map(Index::getName).forEach(index -> { + dataStream.getBackingIndices().getIndices().stream().map(Index::getName).forEach(index -> { + stats.backingIndices.add(index); + allBackingIndices.add(index); + }); + dataStream.getFailureIndices().getIndices().stream().map(Index::getName).forEach(index -> { stats.backingIndices.add(index); allBackingIndices.add(index); }); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 7d2828e30d5ab..7de3f180753f8 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -33,7 +33,7 @@ import org.elasticsearch.action.downsample.DownsampleAction; import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; @@ -49,6 +49,9 @@ import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SelectorResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; @@ -944,11 +947,6 @@ private Set maybeExecuteForceMerge(ClusterState state, List indice if ((configuredFloorSegmentMerge == null || configuredFloorSegmentMerge.equals(targetMergePolicyFloorSegment) == false) || (configuredMergeFactor == null || configuredMergeFactor.equals(targetMergePolicyFactor) == false)) { UpdateSettingsRequest updateMergePolicySettingsRequest = new UpdateSettingsRequest(); - updateMergePolicySettingsRequest.indicesOptions( - IndicesOptions.builder(updateMergePolicySettingsRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) - .build() - ); updateMergePolicySettingsRequest.indices(indexName); updateMergePolicySettingsRequest.settings( Settings.builder() @@ -998,8 +996,11 @@ private Set maybeExecuteForceMerge(ClusterState state, List indice private void rolloverDataStream(String writeIndexName, RolloverRequest rolloverRequest, ActionListener listener) { // "saving" the rollover target name here so we don't capture the entire request - String rolloverTarget = rolloverRequest.getRolloverTarget(); - logger.trace("Data stream lifecycle issues rollover request for data stream [{}]", rolloverTarget); + ResolvedExpression resolvedRolloverTarget = SelectorResolver.parseExpression( + rolloverRequest.getRolloverTarget(), + rolloverRequest.indicesOptions() + ); + logger.trace("Data stream lifecycle issues rollover request for data stream [{}]", rolloverRequest.getRolloverTarget()); client.admin().indices().rolloverIndex(rolloverRequest, new ActionListener<>() { @Override public void onResponse(RolloverResponse rolloverResponse) { @@ -1014,7 +1015,7 @@ public void onResponse(RolloverResponse rolloverResponse) { logger.info( "Data stream lifecycle successfully rolled over datastream [{}] due to the following met rollover " + "conditions {}. The new index is [{}]", - rolloverTarget, + rolloverRequest.getRolloverTarget(), metConditions, rolloverResponse.getNewIndex() ); @@ -1024,7 +1025,7 @@ public void onResponse(RolloverResponse rolloverResponse) { @Override public void onFailure(Exception e) { - DataStream dataStream = clusterService.state().metadata().dataStreams().get(rolloverTarget); + DataStream dataStream = clusterService.state().metadata().dataStreams().get(resolvedRolloverTarget.resource()); if (dataStream == null || dataStream.getWriteIndex().getName().equals(writeIndexName) == false) { // the data stream has another write index so no point in recording an error for the previous write index we were // attempting to roll over @@ -1407,9 +1408,7 @@ static RolloverRequest getDefaultRolloverRequest( ) { RolloverRequest rolloverRequest = new RolloverRequest(dataStream, null).masterNodeTimeout(TimeValue.MAX_VALUE); if (rolloverFailureStore) { - rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build() - ); + rolloverRequest.setRolloverTarget(IndexNameExpressionResolver.combineSelector(dataStream, IndexComponentSelector.FAILURES)); } rolloverRequest.setConditions(rolloverConfiguration.resolveRolloverConditions(dataRetention)); return rolloverRequest; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java index 1595348649528..7992362d791b1 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java @@ -50,7 +50,7 @@ public static final class Request extends AcknowledgedRequest implement .allowAliasToMultipleIndices(false) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(false) + .allowSelectors(false) .build() ) .build(); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/DeleteDataStreamOptionsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/DeleteDataStreamOptionsAction.java index 98a29dd636ddf..860bcb5bf2fbe 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/DeleteDataStreamOptionsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/DeleteDataStreamOptionsAction.java @@ -39,7 +39,9 @@ public static final class Request extends AcknowledgedRequest implement .wildcardOptions( IndicesOptions.WildcardOptions.builder().matchOpen(true).matchClosed(true).allowEmptyExpressions(true).resolveAliases(false) ) - .gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true)) + .gatekeeperOptions( + IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true).allowSelectors(false) + ) .build(); public Request(StreamInput in) throws IOException { diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/GetDataStreamOptionsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/GetDataStreamOptionsAction.java index c1354da1129ca..45bda1abd5c02 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/GetDataStreamOptionsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/GetDataStreamOptionsAction.java @@ -50,7 +50,9 @@ public static class Request extends MasterNodeReadRequest implements In .wildcardOptions( IndicesOptions.WildcardOptions.builder().matchOpen(true).matchClosed(true).allowEmptyExpressions(true).resolveAliases(false) ) - .gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true)) + .gatekeeperOptions( + IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true).allowSelectors(false) + ) .build(); private boolean includeDefaults = false; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/PutDataStreamOptionsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/PutDataStreamOptionsAction.java index d055a6972312a..d66b45665d4e2 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/PutDataStreamOptionsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/PutDataStreamOptionsAction.java @@ -71,7 +71,9 @@ public static Request parseRequest(XContentParser parser, Factory factory) { .wildcardOptions( IndicesOptions.WildcardOptions.builder().matchOpen(true).matchClosed(true).allowEmptyExpressions(true).resolveAliases(false) ) - .gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true)) + .gatekeeperOptions( + IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true).allowSelectors(false) + ) .build(); private final DataStreamOptions options; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java index b61e38297397d..be157608b1c3f 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.datastreams.GetDataStreamAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; -import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.set.Sets; @@ -42,8 +41,7 @@ public class RestGetDataStreamsAction extends BaseRestHandler { IndicesOptions.WildcardOptions.ALLOW_NO_INDICES, IndicesOptions.GatekeeperOptions.IGNORE_THROTTLED, "verbose" - ), - DataStream.isFailureStoreFeatureFlagEnabled() ? Set.of(IndicesOptions.FAILURE_STORE_QUERY_PARAM) : Set.of() + ) ) ); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java index d5c5193948213..e32636fe40d84 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteComposableIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.datastreams.DataStreamsStatsAction; import org.elasticsearch.action.datastreams.DeleteDataStreamAction; @@ -22,8 +23,12 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.DataStreamFailureStore; +import org.elasticsearch.cluster.metadata.DataStreamOptions; +import org.elasticsearch.cluster.metadata.ResettableValue; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.xcontent.json.JsonXContent; @@ -40,12 +45,14 @@ import static java.lang.Math.max; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; public class DataStreamsStatsTests extends ESSingleNodeTestCase { @Override protected Collection> getPlugins() { - return List.of(DataStreamsPlugin.class); + return List.of(DataStreamsPlugin.class, MapperExtrasPlugin.class); } private final Set createdDataStreams = new HashSet<>(); @@ -107,8 +114,30 @@ public void testStatsExistingDataStream() throws Exception { assertEquals(stats.getTotalStoreSize().getBytes(), stats.getDataStreams()[0].getStoreSize().getBytes()); } + public void testStatsExistingDataStreamWithFailureStores() throws Exception { + String dataStreamName = createDataStream(false, true); + createFailedDocument(dataStreamName); + + DataStreamsStatsAction.Response stats = getDataStreamsStats(); + + assertEquals(2, stats.getSuccessfulShards()); + assertEquals(0, stats.getFailedShards()); + assertEquals(1, stats.getDataStreamCount()); + assertEquals(2, stats.getBackingIndices()); + assertNotEquals(0L, stats.getTotalStoreSize().getBytes()); + assertEquals(1, stats.getDataStreams().length); + assertEquals(dataStreamName, stats.getDataStreams()[0].getDataStream()); + assertEquals(2, stats.getDataStreams()[0].getBackingIndices()); + // The timestamp is going to not be something we can validate because + // it captures the time of failure which is uncontrolled in the test + // Just make sure it exists by ensuring it isn't zero + assertThat(stats.getDataStreams()[0].getMaximumTimestamp(), is(greaterThan(0L))); + assertNotEquals(0L, stats.getDataStreams()[0].getStoreSize().getBytes()); + assertEquals(stats.getTotalStoreSize().getBytes(), stats.getDataStreams()[0].getStoreSize().getBytes()); + } + public void testStatsExistingHiddenDataStream() throws Exception { - String dataStreamName = createDataStream(true); + String dataStreamName = createDataStream(true, false); long timestamp = createDocument(dataStreamName); DataStreamsStatsAction.Response stats = getDataStreamsStats(true); @@ -221,14 +250,19 @@ public void testStatsMultipleDataStreams() throws Exception { } private String createDataStream() throws Exception { - return createDataStream(false); + return createDataStream(false, false); } - private String createDataStream(boolean hidden) throws Exception { + private String createDataStream(boolean hidden, boolean failureStore) throws Exception { String dataStreamName = randomAlphaOfLength(10).toLowerCase(Locale.getDefault()); + ResettableValue failureStoreOptions = failureStore == false + ? ResettableValue.undefined() + : ResettableValue.create( + new DataStreamOptions.Template(ResettableValue.create(new DataStreamFailureStore.Template(ResettableValue.create(true)))) + ); Template idxTemplate = new Template(null, new CompressedXContent(""" {"properties":{"@timestamp":{"type":"date"},"data":{"type":"keyword"}}} - """), null); + """), null, null, failureStoreOptions); ComposableIndexTemplate template = ComposableIndexTemplate.builder() .indexPatterns(List.of(dataStreamName + "*")) .template(idxTemplate) @@ -269,6 +303,27 @@ private long createDocument(String dataStreamName) throws Exception { return timestamp; } + private long createFailedDocument(String dataStreamName) throws Exception { + // Get some randomized but reasonable timestamps on the data since not all of it is guaranteed to arrive in order. + long timeSeed = System.currentTimeMillis(); + long timestamp = randomLongBetween(timeSeed - TimeUnit.HOURS.toMillis(5), timeSeed); + client().bulk( + new BulkRequest(dataStreamName).add( + new IndexRequest().opType(DocWriteRequest.OpType.CREATE) + .source( + JsonXContent.contentBuilder() + .startObject() + .field("@timestamp", timestamp) + .object("data", b -> b.field("garbage", randomAlphaOfLength(25))) + .endObject() + ) + ) + ).get(); + indicesAdmin().refresh(new RefreshRequest(".fs-" + dataStreamName + "*").indicesOptions(IndicesOptions.lenientExpandOpenHidden())) + .get(); + return timestamp; + } + private DataStreamsStatsAction.Response getDataStreamsStats() throws Exception { return getDataStreamsStats(false); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index 698ab427ab040..ac7dabd868a3f 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.action.downsample.DownsampleAction; import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; @@ -46,6 +46,7 @@ import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -225,11 +226,12 @@ public void testOperationsExecutedOnce() { assertThat(clientSeenRequests.get(0), instanceOf(RolloverRequest.class)); RolloverRequest rolloverBackingIndexRequest = (RolloverRequest) clientSeenRequests.get(0); assertThat(rolloverBackingIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat(rolloverBackingIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.DATA)); assertThat(clientSeenRequests.get(1), instanceOf(RolloverRequest.class)); RolloverRequest rolloverFailureIndexRequest = (RolloverRequest) clientSeenRequests.get(1); - assertThat(rolloverFailureIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat(rolloverFailureIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.FAILURES)); + assertThat( + rolloverFailureIndexRequest.getRolloverTarget(), + is(IndexNameExpressionResolver.combineSelector(dataStreamName, IndexComponentSelector.FAILURES)) + ); List deleteRequests = clientSeenRequests.subList(2, 5) .stream() .map(transportRequest -> (DeleteIndexRequest) transportRequest) @@ -1546,11 +1548,12 @@ public void testFailureStoreIsManagedEvenWhenDisabled() { assertThat(clientSeenRequests.get(0), instanceOf(RolloverRequest.class)); RolloverRequest rolloverBackingIndexRequest = (RolloverRequest) clientSeenRequests.get(0); assertThat(rolloverBackingIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat(rolloverBackingIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.DATA)); assertThat(clientSeenRequests.get(1), instanceOf(RolloverRequest.class)); RolloverRequest rolloverFailureIndexRequest = (RolloverRequest) clientSeenRequests.get(1); - assertThat(rolloverFailureIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat(rolloverFailureIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.FAILURES)); + assertThat( + rolloverFailureIndexRequest.getRolloverTarget(), + is(IndexNameExpressionResolver.combineSelector(dataStreamName, IndexComponentSelector.FAILURES)) + ); assertThat( ((DeleteIndexRequest) clientSeenRequests.get(2)).indices()[0], is(dataStream.getFailureIndices().getIndices().get(0).getName()) diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml index 13f79e95d99f4..f439cf59bf2d3 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml @@ -148,8 +148,7 @@ # rollover data stream to create new failure store index - do: indices.rollover: - alias: "data-stream-for-modification" - target_failure_store: true + alias: "data-stream-for-modification::failures" - is_true: acknowledged # save index names for later use diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/210_rollover_failure_store.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/210_rollover_failure_store.yml index cc3a11ffde5e8..51a1e96b1e937 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/210_rollover_failure_store.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/210_rollover_failure_store.yml @@ -9,7 +9,7 @@ setup: capabilities: [ 'failure_store_in_template' ] - method: POST path: /{index}/_rollover - capabilities: [ 'lazy-rollover-failure-store' ] + capabilities: [ 'lazy-rollover-failure-store', 'index-expression-selectors' ] - do: allowed_warnings: @@ -58,8 +58,7 @@ teardown: - do: indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" - match: { acknowledged: true } - match: { old_index: "/\\.fs-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000002/" } @@ -92,8 +91,7 @@ teardown: - do: indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" body: conditions: max_docs: 1 @@ -130,8 +128,7 @@ teardown: - do: indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" body: conditions: max_primary_shard_docs: 2 @@ -165,8 +162,7 @@ teardown: # Mark the failure store for lazy rollover - do: indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" lazy: true - match: { acknowledged: true } @@ -263,8 +259,7 @@ teardown: # Mark the failure store for lazy rollover - do: indices.rollover: - alias: data-stream-for-lazy-rollover - target_failure_store: true + alias: data-stream-for-lazy-rollover::failures lazy: true - match: { acknowledged: true } @@ -332,8 +327,7 @@ teardown: # Mark the failure store for lazy rollover - do: indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" lazy: true - match: { acknowledged: true } @@ -377,16 +371,14 @@ teardown: - do: catch: /Rolling over\/initializing an empty failure store is only supported without conditions\./ indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" body: conditions: max_docs: 1 - do: indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" - match: { acknowledged: true } - match: { old_index: "_none_" } @@ -424,8 +416,7 @@ teardown: # Initializing should work - do: indices.rollover: - alias: "other-data-stream-for-rollover" - target_failure_store: true + alias: "other-data-stream-for-rollover::failures" - match: { acknowledged: true } - match: { old_index: "_none_" } @@ -448,8 +439,7 @@ teardown: # And "regular" rollover should work - do: indices.rollover: - alias: "other-data-stream-for-rollover" - target_failure_store: true + alias: "other-data-stream-for-rollover::failures" - match: { acknowledged: true } - match: { old_index: "/\\.fs-other-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000002/" } diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestTests.java index 990a92fb0dbf8..19069c876e7e7 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.script.ScriptType; -import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.StreamsUtils; @@ -102,7 +101,7 @@ public void testMultiSearchTemplateToJson() throws Exception { String[] indices = { "test" }; SearchRequest searchRequest = new SearchRequest(indices); // scroll is not supported in the current msearch or msearchtemplate api, so unset it: - searchRequest.scroll((Scroll) null); + searchRequest.scroll(null); // batched reduce size is currently not set-able on a per-request basis as it is a query string parameter only searchRequest.setBatchedReduceSize(SearchRequest.DEFAULT_BATCHED_REDUCE_SIZE); SearchTemplateRequest searchTemplateRequest = new SearchTemplateRequest(searchRequest); diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractAsyncBulkByScrollAction.java index fe591387e9b35..de90ff97e6a95 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractAsyncBulkByScrollAction.java @@ -43,7 +43,6 @@ import org.elasticsearch.script.Metadata; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.sort.SortBuilder; import org.elasticsearch.threadpool.ThreadPool; @@ -212,7 +211,7 @@ static > SearchRequest prep if (mainRequest.getMaxDocs() != MAX_DOCS_ALL_MATCHES && mainRequest.getMaxDocs() <= preparedSearchRequest.source().size() && mainRequest.isAbortOnVersionConflict()) { - preparedSearchRequest.scroll((Scroll) null); + preparedSearchRequest.scroll(null); } return preparedSearchRequest; diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteRequestBuilders.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteRequestBuilders.java index ddf73e313e830..bb2073849edc3 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteRequestBuilders.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteRequestBuilders.java @@ -53,7 +53,7 @@ static Request initialSearch(SearchRequest searchRequest, BytesReference query, Request request = new Request("POST", path.toString()); if (searchRequest.scroll() != null) { - TimeValue keepAlive = searchRequest.scroll().keepAlive(); + TimeValue keepAlive = searchRequest.scroll(); // V_5_0_0 if (remoteVersion.before(Version.fromId(5000099))) { /* Versions of Elasticsearch before 5.0 couldn't parse nanos or micros diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteScrollableHitSource.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteScrollableHitSource.java index 5c3db5aaa6cda..cc08357aa4081 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteScrollableHitSource.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteScrollableHitSource.java @@ -100,7 +100,7 @@ private void onStartResponse(RejectAwareActionListener searchListener, @Override protected void doStartNextScroll(String scrollId, TimeValue extraKeepAlive, RejectAwareActionListener searchListener) { - TimeValue keepAlive = timeValueNanos(searchRequest.scroll().keepAlive().nanos() + extraKeepAlive.nanos()); + TimeValue keepAlive = timeValueNanos(searchRequest.scroll().nanos() + extraKeepAlive.nanos()); execute(RemoteRequestBuilders.scroll(scrollId, keepAlive, remoteVersion), RESPONSE_PARSER, searchListener); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java index 26b35a5bbd4b7..28f2eafc20a6e 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java @@ -597,7 +597,7 @@ protected RequestWrapper buildRequest(Hit doc) { capturedCommand.get().run(); // So the next request is going to have to wait an extra 100 seconds or so (base was 10 seconds, so 110ish) - assertThat(client.lastScroll.get().request.scroll().keepAlive().seconds(), either(equalTo(110L)).or(equalTo(109L))); + assertThat(client.lastScroll.get().request.scroll().seconds(), either(equalTo(110L)).or(equalTo(109L))); // Now we can simulate a response and check the delay that we used for the task if (randomBoolean()) { diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java index 1c104cbd08197..5f4e2b3a55156 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java @@ -155,10 +155,7 @@ public void testScrollKeepAlive() { ); hitSource.startNextScroll(timeValueSeconds(100)); - client.validateRequest( - TransportSearchScrollAction.TYPE, - (SearchScrollRequest r) -> assertEquals(r.scroll().keepAlive().seconds(), 110) - ); + client.validateRequest(TransportSearchScrollAction.TYPE, (SearchScrollRequest r) -> assertEquals(r.scroll().seconds(), 110)); } private SearchResponse createSearchResponse() { diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexRestClientSslTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexRestClientSslTests.java index 766c3ff695f84..bcc6177f8363c 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexRestClientSslTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexRestClientSslTests.java @@ -113,14 +113,20 @@ private static SSLContext buildServerSslContext() throws Exception { } public void testClientFailsWithUntrustedCertificate() throws IOException { - assumeFalse("https://github.com/elastic/elasticsearch/issues/49094", inFipsJvm()); final List threads = new ArrayList<>(); final Settings.Builder builder = Settings.builder().put("path.home", createTempDir()); final Settings settings = builder.build(); final Environment environment = TestEnvironment.newEnvironment(settings); final ReindexSslConfig ssl = new ReindexSslConfig(settings, environment, mock(ResourceWatcherService.class)); try (RestClient client = Reindexer.buildRestClient(getRemoteInfo(), ssl, 1L, threads)) { - expectThrows(SSLHandshakeException.class, () -> client.performRequest(new Request("GET", "/"))); + if (inFipsJvm()) { + // Bouncy Castle throws a different exception + IOException exception = expectThrows(IOException.class, () -> client.performRequest(new Request("GET", "/"))); + assertThat(exception.getCause(), Matchers.instanceOf(javax.net.ssl.SSLException.class)); + } else { + expectThrows(SSLHandshakeException.class, () -> client.performRequest(new Request("GET", "/"))); + + } } } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index 1a66f5782fc03..a8a6986ccbb7a 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -434,7 +434,7 @@ public void onFileCreated(Path file) { public void onFileChanged(Path file) { if (file.equals(webIdentityTokenFileSymlink)) { LOGGER.debug("WS web identity token file [{}] changed, updating credentials", file); - credentialsProvider.refresh(); + SocketAccess.doPrivilegedVoid(credentialsProvider::refresh); } } }); diff --git a/muted-tests.yml b/muted-tests.yml index 2f89d6244a36a..69112af1ece10 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -84,9 +84,6 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/115816 - class: org.elasticsearch.xpack.application.connector.ConnectorIndexServiceTests issue: https://github.com/elastic/elasticsearch/issues/116087 -- class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT - method: testLookbackWithIndicesOptions - issue: https://github.com/elastic/elasticsearch/issues/116127 - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=transform/transforms_start_stop/Test start already started transform} issue: https://github.com/elastic/elasticsearch/issues/98802 @@ -102,9 +99,6 @@ tests: - class: org.elasticsearch.xpack.shutdown.NodeShutdownIT method: testAllocationPreventedForRemoval issue: https://github.com/elastic/elasticsearch/issues/116363 -- class: org.elasticsearch.xpack.downsample.ILMDownsampleDisruptionIT - method: testILMDownsampleRollingRestart - issue: https://github.com/elastic/elasticsearch/issues/114233 - class: org.elasticsearch.reservedstate.service.RepositoriesFileSettingsIT method: testSettingsApplied issue: https://github.com/elastic/elasticsearch/issues/116694 @@ -155,8 +149,6 @@ tests: - class: org.elasticsearch.test.rest.yaml.CcsCommonYamlTestSuiteIT method: test {p0=search.highlight/50_synthetic_source/text multi unified from vectors} issue: https://github.com/elastic/elasticsearch/issues/117815 -- class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT - issue: https://github.com/elastic/elasticsearch/issues/111319 - class: org.elasticsearch.xpack.esql.plugin.ClusterRequestTests method: testFallbackIndicesOptions issue: https://github.com/elastic/elasticsearch/issues/117937 @@ -236,38 +228,50 @@ tests: - class: org.elasticsearch.smoketest.MlWithSecurityIT method: test {yaml=ml/sparse_vector_search/Test sparse_vector search with query vector and pruning config} issue: https://github.com/elastic/elasticsearch/issues/119548 -- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT - method: testSearchableSnapshotUpgrade {p0=[9.0.0, 8.18.0, 8.18.0]} - issue: https://github.com/elastic/elasticsearch/issues/119549 -- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT - method: testMountSearchableSnapshot {p0=[9.0.0, 8.18.0, 8.18.0]} - issue: https://github.com/elastic/elasticsearch/issues/119550 -- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT - method: testMountSearchableSnapshot {p0=[9.0.0, 9.0.0, 8.18.0]} - issue: https://github.com/elastic/elasticsearch/issues/119551 - class: org.elasticsearch.index.engine.LuceneSyntheticSourceChangesSnapshotTests method: testSkipNonRootOfNestedDocuments issue: https://github.com/elastic/elasticsearch/issues/119553 -- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT - method: testSearchableSnapshotUpgrade {p0=[9.0.0, 9.0.0, 8.18.0]} - issue: https://github.com/elastic/elasticsearch/issues/119560 -- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT - method: testMountSearchableSnapshot {p0=[9.0.0, 9.0.0, 9.0.0]} - issue: https://github.com/elastic/elasticsearch/issues/119561 -- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT - method: testSearchableSnapshotUpgrade {p0=[9.0.0, 9.0.0, 9.0.0]} - issue: https://github.com/elastic/elasticsearch/issues/119562 - class: org.elasticsearch.xpack.ml.integration.ForecastIT method: testOverflowToDisk issue: https://github.com/elastic/elasticsearch/issues/117740 - class: org.elasticsearch.xpack.security.authc.ldap.MultiGroupMappingIT issue: https://github.com/elastic/elasticsearch/issues/119599 -- class: org.elasticsearch.lucene.FullClusterRestartSearchableSnapshotIndexCompatibilityIT - method: testSearchableSnapshotUpgrade {p0=8.18.0} - issue: https://github.com/elastic/elasticsearch/issues/119631 -- class: org.elasticsearch.lucene.FullClusterRestartSearchableSnapshotIndexCompatibilityIT - method: testSearchableSnapshotUpgrade {p0=9.0.0} - issue: https://github.com/elastic/elasticsearch/issues/119632 +- class: org.elasticsearch.search.profile.dfs.DfsProfilerIT + method: testProfileDfs + issue: https://github.com/elastic/elasticsearch/issues/119711 +- class: org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizerTests + method: testSingleMatchFunctionFilterPushdownWithStringValues {default} + issue: https://github.com/elastic/elasticsearch/issues/119720 +- class: org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizerTests + method: testSingleMatchFunctionPushdownWithCasting {default} + issue: https://github.com/elastic/elasticsearch/issues/119722 +- class: org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizerTests + method: testSingleMatchOperatorFilterPushdownWithStringValues {default} + issue: https://github.com/elastic/elasticsearch/issues/119721 +- class: org.elasticsearch.script.mustache.SearchTemplateRequestTests + method: testConcurrentSerialization + issue: https://github.com/elastic/elasticsearch/issues/119819 +- class: org.elasticsearch.script.mustache.SearchTemplateRequestTests + method: testEqualsAndHashcode + issue: https://github.com/elastic/elasticsearch/issues/119820 +- class: org.elasticsearch.script.mustache.SearchTemplateRequestTests + method: testConcurrentEquals + issue: https://github.com/elastic/elasticsearch/issues/119821 +- class: org.elasticsearch.script.mustache.SearchTemplateRequestTests + method: testSerialization + issue: https://github.com/elastic/elasticsearch/issues/119822 +- class: org.elasticsearch.index.rankeval.RankEvalRequestTests + method: testSerialization + issue: https://github.com/elastic/elasticsearch/issues/119859 +- class: org.elasticsearch.index.rankeval.RankEvalRequestTests + method: testEqualsAndHashcode + issue: https://github.com/elastic/elasticsearch/issues/119860 +- class: org.elasticsearch.index.rankeval.RankEvalRequestTests + method: testConcurrentSerialization + issue: https://github.com/elastic/elasticsearch/issues/119861 +- class: org.elasticsearch.index.rankeval.RankEvalRequestTests + method: testConcurrentEquals + issue: https://github.com/elastic/elasticsearch/issues/119862 # Examples: # diff --git a/plugins/repository-hdfs/hadoop-client-api/src/patcher/java/org/elasticsearch/hdfs/patch/HdfsClassPatcher.java b/plugins/repository-hdfs/hadoop-client-api/src/patcher/java/org/elasticsearch/hdfs/patch/HdfsClassPatcher.java index 6636b39445964..732c55929454e 100644 --- a/plugins/repository-hdfs/hadoop-client-api/src/patcher/java/org/elasticsearch/hdfs/patch/HdfsClassPatcher.java +++ b/plugins/repository-hdfs/hadoop-client-api/src/patcher/java/org/elasticsearch/hdfs/patch/HdfsClassPatcher.java @@ -27,7 +27,11 @@ public class HdfsClassPatcher { "org/apache/hadoop/util/ShutdownHookManager.class", ShutdownHookManagerPatcher::new, "org/apache/hadoop/util/Shell.class", - ShellPatcher::new + ShellPatcher::new, + "org/apache/hadoop/security/UserGroupInformation.class", + SubjectGetSubjectPatcher::new, + "org/apache/hadoop/security/authentication/client/KerberosAuthenticator.class", + SubjectGetSubjectPatcher::new ); public static void main(String[] args) throws Exception { diff --git a/plugins/repository-hdfs/hadoop-client-api/src/patcher/java/org/elasticsearch/hdfs/patch/SubjectGetSubjectPatcher.java b/plugins/repository-hdfs/hadoop-client-api/src/patcher/java/org/elasticsearch/hdfs/patch/SubjectGetSubjectPatcher.java new file mode 100644 index 0000000000000..3fb8a23be794d --- /dev/null +++ b/plugins/repository-hdfs/hadoop-client-api/src/patcher/java/org/elasticsearch/hdfs/patch/SubjectGetSubjectPatcher.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.hdfs.patch; + +import org.objectweb.asm.ClassVisitor; +import org.objectweb.asm.ClassWriter; +import org.objectweb.asm.MethodVisitor; +import org.objectweb.asm.Type; + +import static org.objectweb.asm.Opcodes.ASM9; +import static org.objectweb.asm.Opcodes.INVOKESTATIC; +import static org.objectweb.asm.Opcodes.POP; + +class SubjectGetSubjectPatcher extends ClassVisitor { + SubjectGetSubjectPatcher(ClassWriter classWriter) { + super(ASM9, classWriter); + } + + @Override + public MethodVisitor visitMethod(int access, String name, String descriptor, String signature, String[] exceptions) { + return new ReplaceCallMethodVisitor(super.visitMethod(access, name, descriptor, signature, exceptions), name, access, descriptor); + } + + /** + * Replaces calls to Subject.getSubject(context); with calls to Subject.current(); + */ + private static class ReplaceCallMethodVisitor extends MethodVisitor { + private static final String SUBJECT_CLASS_INTERNAL_NAME = "javax/security/auth/Subject"; + private static final String METHOD_NAME = "getSubject"; + + ReplaceCallMethodVisitor(MethodVisitor methodVisitor, String name, int access, String descriptor) { + super(ASM9, methodVisitor); + } + + @Override + public void visitMethodInsn(int opcode, String owner, String name, String descriptor, boolean isInterface) { + if (opcode == INVOKESTATIC && SUBJECT_CLASS_INTERNAL_NAME.equals(owner) && METHOD_NAME.equals(name)) { + // Get rid of the extra arg on the stack + mv.visitInsn(POP); + // Call Subject.current() + mv.visitMethodInsn( + INVOKESTATIC, + SUBJECT_CLASS_INTERNAL_NAME, + "current", + Type.getMethodDescriptor(Type.getObjectType(SUBJECT_CLASS_INTERNAL_NAME)), + false + ); + } else { + super.visitMethodInsn(opcode, owner, name, descriptor, isInterface); + } + } + } +} diff --git a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle index e63b1629db39c..5bbade8cf6fce 100644 --- a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle +++ b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle @@ -50,8 +50,6 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> nonInputProperties.systemProperty('tests.rest.cluster', localCluster.map(c -> c.allHttpSocketURI.join(","))) nonInputProperties.systemProperty('tests.rest.remote_cluster', remoteCluster.map(c -> c.allHttpSocketURI.join(","))) } - - onlyIf("FIPS mode disabled") { buildParams.inFipsJvm == false } } tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { diff --git a/qa/lucene-index-compatibility/build.gradle b/qa/lucene-index-compatibility/build.gradle index 37e5eea85a08b..3b2e69ec9859f 100644 --- a/qa/lucene-index-compatibility/build.gradle +++ b/qa/lucene-index-compatibility/build.gradle @@ -14,7 +14,9 @@ buildParams.bwcVersions.withLatestReadOnlyIndexCompatible { bwcVersion -> tasks.named("javaRestTest").configure { systemProperty("tests.minimum.index.compatible", bwcVersion) usesBwcDistribution(bwcVersion) - enabled = true + + // Tests rely on unreleased code in 8.18 branch + enabled = buildParams.isSnapshotBuild() } } @@ -22,4 +24,3 @@ tasks.withType(Test).configureEach { // CI doesn't like it when there's multiple clusters running at once maxParallelForks = 1 } - diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractIndexCompatibilityTestCase.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractIndexCompatibilityTestCase.java index 8c9a42dc926e9..13c647983fad5 100644 --- a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractIndexCompatibilityTestCase.java +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractIndexCompatibilityTestCase.java @@ -207,7 +207,7 @@ protected static void updateRandomIndexSettings(String indexName) throws IOExcep switch (i) { case 0 -> settings.putList(IndexSettings.DEFAULT_FIELD_SETTING.getKey(), "field_" + randomInt(2)); case 1 -> settings.put(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey(), randomIntBetween(1, 100)); - case 2 -> settings.put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), randomLongBetween(0L, 1000L)); + case 2 -> settings.put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), randomLongBetween(100L, 1000L)); case 3 -> settings.put(IndexSettings.MAX_SLICES_PER_SCROLL.getKey(), randomIntBetween(1, 1024)); default -> throw new IllegalStateException(); } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.get.json index 2645df28c5d1e..670bb4267bdfa 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.get.json @@ -26,6 +26,13 @@ } } ] + }, + "params": { + "include_deleted": { + "type": "boolean", + "default": false, + "description": "A flag indicating whether to return connectors that have been soft-deleted." + } } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.list.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.list.json index 67d2250d3c661..b8c73a09704f1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.list.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.list.json @@ -47,6 +47,11 @@ "query": { "type": "string", "description": "A search string for querying connectors, filtering results by matching against connector names, descriptions, and index names" + }, + "include_deleted": { + "type": "boolean", + "default": false, + "description": "A flag indicating whether to return connectors that have been soft-deleted." } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json index 299c24f987d8d..47a1bee665506 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json @@ -63,12 +63,6 @@ "type":"boolean", "default":"false", "description":"If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. Only allowed on data streams." - }, - "target_failure_store":{ - "type":"boolean", - "description":"If set to true, the rollover action will be applied on the failure store of the data stream.", - "visibility": "feature_flag", - "feature_flag": "es.failure_store_feature_flag_enabled" } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/license.post_start_trial.json b/rest-api-spec/src/main/resources/rest-api-spec/api/license.post_start_trial.json index 986040d69cb4f..9fb85807d611f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/license.post_start_trial.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/license.post_start_trial.json @@ -31,10 +31,6 @@ "master_timeout": { "type": "time", "description": "Timeout for processing on master node" - }, - "timeout": { - "type": "time", - "description": "Timeout for acknowledgement of update from all nodes in cluster" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/migrate.create_from.json b/rest-api-spec/src/main/resources/rest-api-spec/api/migrate.create_from.json new file mode 100644 index 0000000000000..e17a69a77b252 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/migrate.create_from.json @@ -0,0 +1,37 @@ +{ + "migrate.create_from":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-stream-reindex.html", + "description":"This API creates a destination from a source index. It copies the mappings and settings from the source index while allowing request settings and mappings to override the source values." + }, + "stability":"experimental", + "visibility":"private", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_create_from/{source}/{dest}", + "methods":[ "PUT", "POST"], + "parts":{ + "source":{ + "type":"string", + "description":"The source index name" + }, + "dest":{ + "type":"string", + "description":"The destination index name" + } + } + } + ] + }, + "body":{ + "description":"The body contains the fields `mappings_override` and `settings_override`.", + "required":false + } + } +} + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/shutdown.delete_node.json b/rest-api-spec/src/main/resources/rest-api-spec/api/shutdown.delete_node.json index d990d1da1f144..6f1ec484e94d0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/shutdown.delete_node.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/shutdown.delete_node.json @@ -26,6 +26,15 @@ } ] }, - "params":{} + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/shutdown.put_node.json b/rest-api-spec/src/main/resources/rest-api-spec/api/shutdown.put_node.json index bf20cf3b70bac..90b19557f5fb2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/shutdown.put_node.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/shutdown.put_node.json @@ -26,7 +26,16 @@ } ] }, - "params":{}, + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + }, "body":{ "description":"The shutdown type definition to register", "required": true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/xpack.info.json b/rest-api-spec/src/main/resources/rest-api-spec/api/xpack.info.json index 68b2a5d2c2c8b..35895f0ddb581 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/xpack.info.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/xpack.info.json @@ -20,6 +20,12 @@ ] }, "params":{ + "human":{ + "type":"boolean", + "required":false, + "description":"Defines whether additional human-readable information is included in the response. In particular, it adds descriptions and a tag line. The default value is true.", + "default":true + }, "categories":{ "type":"list", "description":"Comma-separated list of info categories. Can be any of: build, license, features" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.shards/10_basic.yml index 03d8b2068d23e..45f381eab80b1 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -45,6 +45,7 @@ indexing.index_time .+ \n indexing.index_total .+ \n indexing.index_failed .+ \n + indexing.index_failed_due_to_version_conflict .+ \n merges.current .+ \n merges.current_docs .+ \n merges.current_size .+ \n diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index e8e4eb7562462..15b4a557b2b8b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.cluster; -import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -565,7 +564,7 @@ public IndexMetadata randomCreate(String name) { settingsBuilder.put(randomSettings(Settings.EMPTY)).put(IndexMetadata.SETTING_VERSION_CREATED, randomWriteVersion()); builder.settings(settingsBuilder); builder.numberOfShards(randomIntBetween(1, 10)).numberOfReplicas(randomInt(10)); - builder.eventIngestedRange(IndexLongFieldRange.UNKNOWN, TransportVersion.current()); + builder.eventIngestedRange(IndexLongFieldRange.UNKNOWN); int aliasCount = randomInt(10); for (int i = 0; i < aliasCount; i++) { builder.putAlias(randomAlias()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/IndicesMetricsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/IndicesMetricsIT.java index 31c7720ffec1c..fee2c0494365e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/IndicesMetricsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/IndicesMetricsIT.java @@ -30,6 +30,7 @@ import java.io.IOException; import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -76,6 +77,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { static final String STANDARD_INDEXING_COUNT = "es.indices.standard.indexing.total"; static final String STANDARD_INDEXING_TIME = "es.indices.standard.indexing.time"; static final String STANDARD_INDEXING_FAILURE = "es.indices.standard.indexing.failure.total"; + static final String STANDARD_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT = "es.indices.standard.indexing.failure.version_conflict.total"; static final String TIME_SERIES_INDEX_COUNT = "es.indices.time_series.total"; static final String TIME_SERIES_BYTES_SIZE = "es.indices.time_series.size"; @@ -89,6 +91,8 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { static final String TIME_SERIES_INDEXING_COUNT = "es.indices.time_series.indexing.total"; static final String TIME_SERIES_INDEXING_TIME = "es.indices.time_series.indexing.time"; static final String TIME_SERIES_INDEXING_FAILURE = "es.indices.time_series.indexing.failure.total"; + static final String TIME_SERIES_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT = + "es.indices.time_series.indexing.failure.version_conflict.total"; static final String LOGSDB_INDEX_COUNT = "es.indices.logsdb.total"; static final String LOGSDB_BYTES_SIZE = "es.indices.logsdb.size"; @@ -102,6 +106,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { static final String LOGSDB_INDEXING_COUNT = "es.indices.logsdb.indexing.total"; static final String LOGSDB_INDEXING_TIME = "es.indices.logsdb.indexing.time"; static final String LOGSDB_INDEXING_FAILURE = "es.indices.logsdb.indexing.failure.total"; + static final String LOGSDB_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT = "es.indices.logsdb.indexing.failure.version_conflict.total"; public void testIndicesMetrics() { String indexNode = internalCluster().startNode(); @@ -132,7 +137,9 @@ public void testIndicesMetrics() { STANDARD_INDEXING_TIME, greaterThanOrEqualTo(0L), STANDARD_INDEXING_FAILURE, - equalTo(indexing1.getIndexFailedCount() - indexing0.getIndexCount()) + equalTo(indexing1.getIndexFailedCount() - indexing0.getIndexFailedCount()), + STANDARD_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT, + equalTo(indexing1.getIndexFailedDueToVersionConflictCount() - indexing0.getIndexFailedDueToVersionConflictCount()) ) ); @@ -155,7 +162,9 @@ public void testIndicesMetrics() { TIME_SERIES_INDEXING_TIME, greaterThanOrEqualTo(0L), TIME_SERIES_INDEXING_FAILURE, - equalTo(indexing2.getIndexFailedCount() - indexing1.getIndexFailedCount()) + equalTo(indexing1.getIndexFailedCount() - indexing0.getIndexFailedCount()), + TIME_SERIES_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT, + equalTo(indexing1.getIndexFailedDueToVersionConflictCount() - indexing0.getIndexFailedDueToVersionConflictCount()) ) ); @@ -177,13 +186,14 @@ public void testIndicesMetrics() { LOGSDB_INDEXING_TIME, greaterThanOrEqualTo(0L), LOGSDB_INDEXING_FAILURE, - equalTo(indexing3.getIndexFailedCount() - indexing2.getIndexFailedCount()) + equalTo(indexing3.getIndexFailedCount() - indexing2.getIndexFailedCount()), + LOGSDB_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT, + equalTo(indexing3.getIndexFailedDueToVersionConflictCount() - indexing2.getIndexFailedDueToVersionConflictCount()) ) ); // already collected indexing stats - collectThenAssertMetrics( - telemetry, - 4, + Map> zeroMatchers = new HashMap<>(); + zeroMatchers.putAll( Map.of( STANDARD_INDEXING_COUNT, equalTo(0L), @@ -191,22 +201,35 @@ public void testIndicesMetrics() { equalTo(0L), STANDARD_INDEXING_FAILURE, equalTo(0L), - + STANDARD_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT, + equalTo(0L) + ) + ); + zeroMatchers.putAll( + Map.of( TIME_SERIES_INDEXING_COUNT, equalTo(0L), TIME_SERIES_INDEXING_TIME, equalTo(0L), TIME_SERIES_INDEXING_FAILURE, equalTo(0L), - + TIME_SERIES_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT, + equalTo(0L) + ) + ); + zeroMatchers.putAll( + Map.of( LOGSDB_INDEXING_COUNT, equalTo(0L), LOGSDB_INDEXING_TIME, equalTo(0L), LOGSDB_INDEXING_FAILURE, + equalTo(0L), + LOGSDB_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT, equalTo(0L) ) ); + collectThenAssertMetrics(telemetry, 4, zeroMatchers); String searchNode = internalCluster().startDataOnlyNode(); indicesService = internalCluster().getInstance(IndicesService.class, searchNode); telemetry = internalCluster().getInstance(PluginsService.class, searchNode) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java index 1635a08e1768b..04130d176b9e5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java @@ -9,14 +9,17 @@ package org.elasticsearch.monitor.metrics; +import org.apache.lucene.analysis.TokenStream; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.bulk.IncrementalBulkService; import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -25,6 +28,13 @@ import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexingPressure; +import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; +import org.elasticsearch.index.analysis.TokenFilterFactory; +import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.plugins.AnalysisPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.rest.RestStatus; @@ -43,13 +53,16 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; +import static java.util.Collections.singletonMap; import static org.elasticsearch.index.IndexingPressure.MAX_COORDINATING_BYTES; import static org.elasticsearch.index.IndexingPressure.MAX_PRIMARY_BYTES; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) @@ -66,7 +79,7 @@ public List> getSettings() { @Override protected Collection> nodePlugins() { - return List.of(TestTelemetryPlugin.class, TestAPMInternalSettings.class); + return List.of(TestTelemetryPlugin.class, TestAPMInternalSettings.class, TestAnalysisPlugin.class); } @Override @@ -77,6 +90,197 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { .build(); } + public void testZeroMetricsForVersionConflictsForNonIndexingOperations() { + final String dataNode = internalCluster().startNode(); + ensureStableCluster(1); + + final TestTelemetryPlugin plugin = internalCluster().getInstance(PluginsService.class, dataNode) + .filterPlugins(TestTelemetryPlugin.class) + .findFirst() + .orElseThrow(); + plugin.resetMeter(); + + assertAcked(prepareCreate("index_no_refresh", Settings.builder().put("index.refresh_interval", "-1"))); + assertAcked(prepareCreate("index_with_default_refresh")); + + for (String indexName : List.of("index_no_refresh", "index_with_default_refresh")) { + String docId = randomUUID(); + client(dataNode).index(new IndexRequest(indexName).id(docId).source(Map.of())).actionGet(); + // test version conflicts are counted when getting from the translog + if (randomBoolean()) { + // this get has the side effect of tracking translog location in the live version map, + // which potentially influences the engine conflict exception path + client(dataNode).get(new GetRequest(indexName, docId).realtime(randomBoolean())).actionGet(); + } + { + var e = expectThrows( + VersionConflictEngineException.class, + () -> client(dataNode).get( + new GetRequest(indexName, docId).version(10).versionType(randomFrom(VersionType.EXTERNAL, VersionType.EXTERNAL_GTE)) + ).actionGet() + ); + assertThat(e.getMessage(), containsString("version conflict")); + assertThat(e.status(), is(RestStatus.CONFLICT)); + } + if (randomBoolean()) { + client(dataNode).get(new GetRequest(indexName, docId).realtime(false)).actionGet(); + } + client(dataNode).admin().indices().prepareRefresh(indexName).get(); + { + var e = expectThrows( + VersionConflictEngineException.class, + () -> client(dataNode).get( + new GetRequest(indexName, docId).version(5) + .versionType(randomFrom(VersionType.EXTERNAL, VersionType.EXTERNAL_GTE)) + .realtime(false) + ).actionGet() + ); + assertThat(e.getMessage(), containsString("version conflict")); + assertThat(e.status(), is(RestStatus.CONFLICT)); + } + // updates + { + var e = expectThrows( + VersionConflictEngineException.class, + () -> client(dataNode).update( + new UpdateRequest(indexName, docId).setIfPrimaryTerm(1) + .setIfSeqNo(randomIntBetween(2, 5)) + .doc(Map.of(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))) + ).actionGet() + ); + assertThat(e.getMessage(), containsString("version conflict")); + assertThat(e.status(), is(RestStatus.CONFLICT)); + } + // deletes + { + var e = expectThrows( + VersionConflictEngineException.class, + () -> client(dataNode).delete( + new DeleteRequest(indexName, docId).setIfPrimaryTerm(randomIntBetween(2, 5)).setIfSeqNo(0) + ).actionGet() + ); + assertThat(e.getMessage(), containsString("version conflict")); + assertThat(e.status(), is(RestStatus.CONFLICT)); + } + } + + // simulate async apm `polling` call for metrics + plugin.collect(); + + // there are no indexing (version conflict) failures reported because only gets/updates/deletes generated the conflicts + // and those are not "indexing" operations + var indexingFailedTotal = getSingleRecordedMetric(plugin::getLongAsyncCounterMeasurement, "es.indexing.indexing.failed.total"); + assertThat(indexingFailedTotal.getLong(), equalTo(0L)); + var indexingFailedDueToVersionConflictTotal = getSingleRecordedMetric( + plugin::getLongAsyncCounterMeasurement, + "es.indexing.indexing.failed.version_conflict.total" + ); + assertThat(indexingFailedDueToVersionConflictTotal.getLong(), equalTo(0L)); + } + + public void testMetricsForIndexingVersionConflicts() { + final String dataNode = internalCluster().startNode(); + ensureStableCluster(1); + + final TestTelemetryPlugin plugin = internalCluster().getInstance(PluginsService.class, dataNode) + .filterPlugins(TestTelemetryPlugin.class) + .findFirst() + .orElseThrow(); + plugin.resetMeter(); + + assertAcked( + prepareCreate( + "test", + Settings.builder() + .put("index.refresh_interval", "-1") + .put("index.analysis.analyzer.test_analyzer.type", "custom") + .put("index.analysis.analyzer.test_analyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.test_analyzer.filter", "test_token_filter") + ).setMapping(Map.of("properties", Map.of("test_field", Map.of("type", "text", "analyzer", "test_analyzer")))).get() + ); + + String docId = randomUUID(); + // successful index (with version) + client(dataNode).index( + new IndexRequest("test").id(docId) + .version(10) + .versionType(randomFrom(VersionType.EXTERNAL, VersionType.EXTERNAL_GTE)) + .source(Map.of()) + ).actionGet(); + // if_primary_term conflict + { + var e = expectThrows( + VersionConflictEngineException.class, + () -> client(dataNode).index(new IndexRequest("test").id(docId).source(Map.of()).setIfSeqNo(0).setIfPrimaryTerm(2)) + .actionGet() + ); + assertThat(e.getMessage(), containsString("version conflict")); + assertThat(e.status(), is(RestStatus.CONFLICT)); + } + // if_seq_no conflict + { + var e = expectThrows( + VersionConflictEngineException.class, + () -> client(dataNode).index(new IndexRequest("test").id(docId).source(Map.of()).setIfSeqNo(1).setIfPrimaryTerm(1)) + .actionGet() + ); + assertThat(e.getMessage(), containsString("version conflict")); + assertThat(e.status(), is(RestStatus.CONFLICT)); + } + // version conflict + { + var e = expectThrows( + VersionConflictEngineException.class, + () -> client(dataNode).index( + new IndexRequest("test").id(docId) + .source(Map.of()) + .version(3) + .versionType(randomFrom(VersionType.EXTERNAL, VersionType.EXTERNAL_GTE)) + ).actionGet() + ); + assertThat(e.getMessage(), containsString("version conflict")); + assertThat(e.status(), is(RestStatus.CONFLICT)); + } + // indexing failure that is NOT a version conflict + PluginsService pluginService = internalCluster().getInstance(PluginsService.class, dataNode); + pluginService.filterPlugins(TestAnalysisPlugin.class).forEach(p -> p.throwParsingError.set(true)); + { + var e = expectThrows( + MapperParsingException.class, + () -> client(dataNode).index(new IndexRequest("test").id(docId + "other").source(Map.of("test_field", "this will error"))) + .actionGet() + ); + assertThat(e.status(), is(RestStatus.BAD_REQUEST)); + } + + plugin.collect(); + + var indexingFailedTotal = getSingleRecordedMetric(plugin::getLongAsyncCounterMeasurement, "es.indexing.indexing.failed.total"); + assertThat(indexingFailedTotal.getLong(), equalTo(4L)); + var indexingFailedDueToVersionConflictTotal = getSingleRecordedMetric( + plugin::getLongAsyncCounterMeasurement, + "es.indexing.indexing.failed.version_conflict.total" + ); + assertThat(indexingFailedDueToVersionConflictTotal.getLong(), equalTo(3L)); + } + + public static final class TestAnalysisPlugin extends Plugin implements AnalysisPlugin { + final AtomicBoolean throwParsingError = new AtomicBoolean(false); + + @Override + public Map> getTokenFilters() { + return singletonMap("test_token_filter", (indexSettings, environment, name, settings) -> new AbstractTokenFilterFactory(name) { + @Override + public TokenStream create(TokenStream tokenStream) { + if (throwParsingError.get()) { + throw new MapperParsingException("simulate mapping parsing error"); + } + return tokenStream; + } + }); + } + } + public void testNodeIndexingMetricsArePublishing() { final String dataNode = internalCluster().startNode(); @@ -116,6 +320,11 @@ public void testNodeIndexingMetricsArePublishing() { var indexingFailedTotal = getSingleRecordedMetric(plugin::getLongAsyncCounterMeasurement, "es.indexing.indexing.failed.total"); assertThat(indexingFailedTotal.getLong(), equalTo(0L)); + var indexingFailedDueToVersionConflictTotal = getSingleRecordedMetric( + plugin::getLongAsyncCounterMeasurement, + "es.indexing.indexing.failed.version_conflict.total" + ); + assertThat(indexingFailedDueToVersionConflictTotal.getLong(), equalTo(0L)); var deletionTotal = getSingleRecordedMetric(plugin::getLongAsyncCounterMeasurement, "es.indexing.deletion.docs.total"); assertThat(deletionTotal.getLong(), equalTo((long) deletesCount)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java index e079994003751..37e299c816562 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.search.Scroll; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.sort.ShardDocSortField; @@ -97,14 +96,14 @@ public void testSearchSort() throws Exception { int fetchSize = randomIntBetween(10, 100); // test _doc sort SearchRequestBuilder request = prepareSearch("test").setQuery(matchAllQuery()) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setScroll(TimeValue.timeValueSeconds(10)) .setSize(fetchSize) .addSort(SortBuilders.fieldSort("_doc")); assertSearchSlicesWithScroll(request, field, max, numDocs); // test numeric sort request = prepareSearch("test").setQuery(matchAllQuery()) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setScroll(TimeValue.timeValueSeconds(10)) .addSort(SortBuilders.fieldSort("random_int")) .setSize(fetchSize); assertSearchSlicesWithScroll(request, field, max, numDocs); @@ -121,7 +120,7 @@ public void testWithPreferenceAndRoutings() throws Exception { int max = randomIntBetween(2, numShards * 3); int fetchSize = randomIntBetween(10, 100); SearchRequestBuilder request = prepareSearch("test").setQuery(matchAllQuery()) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setScroll(TimeValue.timeValueSeconds(10)) .setSize(fetchSize) .setPreference("_shards:1,4") .addSort(SortBuilders.fieldSort("_doc")); @@ -133,7 +132,7 @@ public void testWithPreferenceAndRoutings() throws Exception { int max = randomIntBetween(2, numShards * 3); int fetchSize = randomIntBetween(10, 100); SearchRequestBuilder request = prepareSearch("test").setQuery(matchAllQuery()) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setScroll(TimeValue.timeValueSeconds(10)) .setSize(fetchSize) .setRouting("foo", "bar") .addSort(SortBuilders.fieldSort("_doc")); @@ -151,7 +150,7 @@ public void testWithPreferenceAndRoutings() throws Exception { int max = randomIntBetween(2, numShards * 3); int fetchSize = randomIntBetween(10, 100); SearchRequestBuilder request = prepareSearch("alias1", "alias3").setQuery(matchAllQuery()) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setScroll(TimeValue.timeValueSeconds(10)) .setSize(fetchSize) .addSort(SortBuilders.fieldSort("_doc")); assertSearchSlicesWithScroll(request, "_id", max, numDocs); @@ -176,7 +175,7 @@ private void assertSearchSlicesWithScroll(SearchRequestBuilder request, String f searchResponse.decRef(); searchResponse = client().prepareSearchScroll("test") .setScrollId(scrollId) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setScroll(TimeValue.timeValueSeconds(10)) .get(); scrollId = searchResponse.getScrollId(); totalResults += searchResponse.getHits().getHits().length; @@ -271,7 +270,7 @@ public void testInvalidFields() throws Exception { SearchPhaseExecutionException exc = expectThrows( SearchPhaseExecutionException.class, prepareSearch("test").setQuery(matchAllQuery()) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setScroll(TimeValue.timeValueSeconds(10)) .slice(new SliceBuilder("invalid_random_int", 0, 10)) ); @@ -282,7 +281,7 @@ public void testInvalidFields() throws Exception { exc = expectThrows( SearchPhaseExecutionException.class, prepareSearch("test").setQuery(matchAllQuery()) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setScroll(TimeValue.timeValueSeconds(10)) .slice(new SliceBuilder("invalid_random_kw", 0, 10)) ); rootCause = findRootCause(exc); diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 22f69edc3a5f8..c5bb47ce1e4f7 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -151,6 +151,10 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_CCS_TELEMETRY_STATS = def(8_816_00_0); public static final TransportVersion TEXT_EMBEDDING_QUERY_VECTOR_BUILDER_INFER_MODEL_ID = def(8_817_00_0); public static final TransportVersion ESQL_ENABLE_NODE_LEVEL_REDUCTION = def(8_818_00_0); + public static final TransportVersion JINA_AI_INTEGRATION_ADDED = def(8_819_00_0); + public static final TransportVersion TRACK_INDEX_FAILED_DUE_TO_VERSION_CONFLICT_METRIC = def(8_820_00_0); + public static final TransportVersion REPLACE_FAILURE_STORE_OPTIONS_WITH_SELECTOR_SYNTAX = def(8_821_00_0); + public static final TransportVersion ELASTIC_INFERENCE_SERVICE_UNIFIED_CHAT_COMPLETIONS_INTEGRATION = def(8_822_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 03e05ca0e4247..24c427c32d69a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeRequest; -import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -44,9 +43,7 @@ public class RestoreSnapshotRequest extends MasterNodeRequest .allowAliasToMultipleIndices(false) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(true) + .allowSelectors(false) .build() ) .build(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java index 801dbbdee0858..be7aaeec8f69e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.info.ClusterInfoRequest; -import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.ArrayUtils; @@ -95,13 +94,7 @@ public static Feature[] fromRequest(RestRequest request) { private transient boolean includeDefaults = false; public GetIndexRequest() { - super( - DataStream.isFailureStoreFeatureFlagEnabled() - ? IndicesOptions.builder(IndicesOptions.strictExpandOpen()) - .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) - .build() - : IndicesOptions.strictExpandOpen() - ); + super(IndicesOptions.strictExpandOpen()); } public GetIndexRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index 7b782c6da5a84..05cc0d2cf05d8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -82,7 +82,7 @@ public class PutMappingRequest extends AcknowledgedRequest im .allowClosedIndices(true) .allowAliasToMultipleIndices(true) .ignoreThrottled(false) - .allowFailureIndices(false) + .allowSelectors(false) ) .build(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index 749470e181deb..24f8735b6bd7f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -20,6 +20,8 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataMappingService; import org.elasticsearch.cluster.service.ClusterService; @@ -40,6 +42,7 @@ import java.util.List; import java.util.Objects; import java.util.Optional; +import java.util.SortedMap; /** * Put mapping action. @@ -106,7 +109,14 @@ protected void masterOperation( return; } - final String message = checkForSystemIndexViolations(systemIndices, concreteIndices, request); + String message = checkForFailureStoreViolations(clusterService.state(), concreteIndices, request); + if (message != null) { + logger.warn(message); + listener.onFailure(new IllegalStateException(message)); + return; + } + + message = checkForSystemIndexViolations(systemIndices, concreteIndices, request); if (message != null) { logger.warn(message); listener.onFailure(new IllegalStateException(message)); @@ -172,6 +182,33 @@ static void performMappingUpdate( metadataMappingService.putMapping(updateRequest, wrappedListener); } + static String checkForFailureStoreViolations(ClusterState clusterState, Index[] concreteIndices, PutMappingRequest request) { + // Requests that a cluster generates itself are permitted to make changes to mappings + // so that rolling upgrade scenarios still work. We check this via the request's origin. + if (Strings.isNullOrEmpty(request.origin()) == false) { + return null; + } + + List violations = new ArrayList<>(); + SortedMap indicesLookup = clusterState.metadata().getIndicesLookup(); + for (Index index : concreteIndices) { + IndexAbstraction indexAbstraction = indicesLookup.get(index.getName()); + if (indexAbstraction != null) { + DataStream maybeDataStream = indexAbstraction.getParentDataStream(); + if (maybeDataStream != null && maybeDataStream.isFailureStoreIndex(index.getName())) { + violations.add(index.getName()); + } + } + } + + if (violations.isEmpty() == false) { + return "Cannot update mappings in " + + violations + + ": mappings for indices contained in data stream failure stores cannot be updated"; + } + return null; + } + static String checkForSystemIndexViolations(SystemIndices systemIndices, Index[] concreteIndices, PutMappingRequest request) { // Requests that a cluster generates itself are permitted to have a difference in mappings // so that rolling upgrade scenarios still work. We check this via the request's origin. diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionRequest.java index 9c5b6097b11bd..ebc9b0fea1be4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionRequest.java @@ -9,12 +9,14 @@ package org.elasticsearch.action.admin.indices.resolve; +import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.tasks.CancellableTask; @@ -30,6 +32,7 @@ public class ResolveClusterActionRequest extends ActionRequest implements IndicesRequest.Replaceable { public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpen(); + public static final String TRANSPORT_VERSION_ERROR_MESSAGE_PREFIX = "ResolveClusterAction requires at least version"; private String[] names; /* @@ -65,12 +68,7 @@ public ResolveClusterActionRequest(String[] names, IndicesOptions indicesOptions public ResolveClusterActionRequest(StreamInput in) throws IOException { super(in); if (in.getTransportVersion().before(TransportVersions.V_8_13_0)) { - throw new UnsupportedOperationException( - "ResolveClusterAction requires at least version " - + TransportVersions.V_8_13_0.toReleaseVersion() - + " but was " - + in.getTransportVersion().toReleaseVersion() - ); + throw new UnsupportedOperationException(createVersionErrorMessage(in.getTransportVersion())); } this.names = in.readStringArray(); this.indicesOptions = IndicesOptions.readIndicesOptions(in); @@ -81,17 +79,21 @@ public ResolveClusterActionRequest(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); if (out.getTransportVersion().before(TransportVersions.V_8_13_0)) { - throw new UnsupportedOperationException( - "ResolveClusterAction requires at least version " - + TransportVersions.V_8_13_0.toReleaseVersion() - + " but was " - + out.getTransportVersion().toReleaseVersion() - ); + throw new UnsupportedOperationException(createVersionErrorMessage(out.getTransportVersion())); } out.writeStringArray(names); indicesOptions.writeIndicesOptions(out); } + private String createVersionErrorMessage(TransportVersion versionFound) { + return Strings.format( + "%s %s but was %s", + TRANSPORT_VERSION_ERROR_MESSAGE_PREFIX, + TransportVersions.V_8_13_0.toReleaseVersion(), + versionFound.toReleaseVersion() + ); + } + @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java index f5c100b7884bb..4aa022aff1c80 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java @@ -59,6 +59,7 @@ import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; +import java.util.stream.Stream; import static org.elasticsearch.action.search.TransportSearchHelper.checkCCSVersionCompatibility; @@ -598,12 +599,13 @@ private static void mergeResults( private static void enrichIndexAbstraction( ClusterState clusterState, - ResolvedExpression indexAbstraction, + ResolvedExpression resolvedExpression, List indices, List aliases, List dataStreams ) { - IndexAbstraction ia = clusterState.metadata().getIndicesLookup().get(indexAbstraction.resource()); + SortedMap indicesLookup = clusterState.metadata().getIndicesLookup(); + IndexAbstraction ia = indicesLookup.get(resolvedExpression.resource()); if (ia != null) { switch (ia.getType()) { case CONCRETE_INDEX -> { @@ -632,13 +634,24 @@ private static void enrichIndexAbstraction( ); } case ALIAS -> { - String[] indexNames = ia.getIndices().stream().map(Index::getName).toArray(String[]::new); + String[] indexNames = getAliasIndexStream(resolvedExpression, ia, indicesLookup).map(Index::getName) + .toArray(String[]::new); Arrays.sort(indexNames); aliases.add(new ResolvedAlias(ia.getName(), indexNames)); } case DATA_STREAM -> { DataStream dataStream = (DataStream) ia; - String[] backingIndices = dataStream.getIndices().stream().map(Index::getName).toArray(String[]::new); + Stream dataStreamIndices = resolvedExpression.selector() == null + ? dataStream.getIndices().stream() + : switch (resolvedExpression.selector()) { + case DATA -> dataStream.getBackingIndices().getIndices().stream(); + case FAILURES -> dataStream.getFailureIndices().getIndices().stream(); + case ALL_APPLICABLE -> Stream.concat( + dataStream.getBackingIndices().getIndices().stream(), + dataStream.getFailureIndices().getIndices().stream() + ); + }; + String[] backingIndices = dataStreamIndices.map(Index::getName).toArray(String[]::new); dataStreams.add(new ResolvedDataStream(dataStream.getName(), backingIndices, DataStream.TIMESTAMP_FIELD_NAME)); } default -> throw new IllegalStateException("unknown index abstraction type: " + ia.getType()); @@ -646,6 +659,52 @@ private static void enrichIndexAbstraction( } } + private static Stream getAliasIndexStream( + ResolvedExpression resolvedExpression, + IndexAbstraction ia, + SortedMap indicesLookup + ) { + Stream aliasIndices; + if (resolvedExpression.selector() == null) { + aliasIndices = ia.getIndices().stream(); + } else { + aliasIndices = switch (resolvedExpression.selector()) { + case DATA -> ia.getIndices().stream(); + case FAILURES -> { + assert ia.isDataStreamRelated() : "Illegal selector [failures] used on non data stream alias"; + yield ia.getIndices() + .stream() + .map(Index::getName) + .map(indicesLookup::get) + .map(IndexAbstraction::getParentDataStream) + .filter(Objects::nonNull) + .distinct() + .map(DataStream::getFailureIndices) + .flatMap(failureIndices -> failureIndices.getIndices().stream()); + } + case ALL_APPLICABLE -> { + if (ia.isDataStreamRelated()) { + yield Stream.concat( + ia.getIndices().stream(), + ia.getIndices() + .stream() + .map(Index::getName) + .map(indicesLookup::get) + .map(IndexAbstraction::getParentDataStream) + .filter(Objects::nonNull) + .distinct() + .map(DataStream::getFailureIndices) + .flatMap(failureIndices -> failureIndices.getIndices().stream()) + ); + } else { + yield ia.getIndices().stream(); + } + } + }; + } + return aliasIndices; + } + enum Attribute { OPEN, CLOSED, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java index e3e737595cac6..50dbaf33d2e4f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java @@ -51,7 +51,6 @@ public class TransportResolveClusterAction extends HandledTransportAction { private static final Logger logger = LogManager.getLogger(TransportResolveClusterAction.class); - private static final String TRANSPORT_VERSION_ERROR_MESSAGE = "ResolveClusterAction requires at least Transport Version"; public static final String NAME = "indices:admin/resolve/cluster"; public static final ActionType TYPE = new ActionType<>(NAME); @@ -175,7 +174,13 @@ public void onFailure(Exception failure) { failure, ElasticsearchSecurityException.class ) instanceof ElasticsearchSecurityException ese) { - clusterInfoMap.put(clusterAlias, new ResolveClusterInfo(true, skipUnavailable, ese.getMessage())); + /* + * some ElasticsearchSecurityExceptions come from the local cluster security interceptor after you've + * issued the client.execute call but before any call went to the remote cluster, so with an + * ElasticsearchSecurityException you can't tell whether the remote cluster is available or not, so mark + * it as connected=false + */ + clusterInfoMap.put(clusterAlias, new ResolveClusterInfo(false, skipUnavailable, ese.getMessage())); } else if (ExceptionsHelper.unwrap(failure, IndexNotFoundException.class) instanceof IndexNotFoundException infe) { clusterInfoMap.put(clusterAlias, new ResolveClusterInfo(true, skipUnavailable, infe.getMessage())); } else { @@ -184,7 +189,7 @@ public void onFailure(Exception failure) { // this error at the Transport layer BEFORE it sends the request to the remote cluster, since there // are version guards on the Writeables for this Action, namely ResolveClusterActionRequest.writeTo if (cause instanceof UnsupportedOperationException - && cause.getMessage().contains(TRANSPORT_VERSION_ERROR_MESSAGE)) { + && cause.getMessage().contains(ResolveClusterActionRequest.TRANSPORT_VERSION_ERROR_MESSAGE_PREFIX)) { // Since this cluster does not have _resolve/cluster, we call the _resolve/index // endpoint to fill in the matching_indices field of the response for that cluster ResolveIndexAction.Request resolveIndexRequest = new ResolveIndexAction.Request( diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java index a677897d79633..7b28acdbd8f84 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java @@ -21,6 +21,8 @@ import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SelectorResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataDataStreamsService; @@ -119,32 +121,38 @@ protected void masterOperation( : "The auto rollover action does not expect any other parameters in the request apart from the data stream name"; Metadata metadata = clusterState.metadata(); - DataStream dataStream = metadata.dataStreams().get(rolloverRequest.getRolloverTarget()); + ResolvedExpression resolvedRolloverTarget = SelectorResolver.parseExpression( + rolloverRequest.getRolloverTarget(), + rolloverRequest.indicesOptions() + ); + boolean isFailureStoreRollover = resolvedRolloverTarget.selector() != null + && resolvedRolloverTarget.selector().shouldIncludeFailures(); + + DataStream dataStream = metadata.dataStreams().get(resolvedRolloverTarget.resource()); // Skip submitting the task if we detect that the lazy rollover has been already executed. - if (isLazyRolloverNeeded(dataStream, rolloverRequest.targetsFailureStore()) == false) { - DataStream.DataStreamIndices targetIndices = dataStream.getDataStreamIndices(rolloverRequest.targetsFailureStore()); + if (isLazyRolloverNeeded(dataStream, isFailureStoreRollover) == false) { + DataStream.DataStreamIndices targetIndices = dataStream.getDataStreamIndices(isFailureStoreRollover); listener.onResponse(noopLazyRolloverResponse(targetIndices)); return; } // We evaluate the names of the source index as well as what our newly created index would be. final MetadataRolloverService.NameResolution trialRolloverNames = MetadataRolloverService.resolveRolloverNames( clusterState, - rolloverRequest.getRolloverTarget(), + resolvedRolloverTarget.resource(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), - rolloverRequest.targetsFailureStore() + isFailureStoreRollover ); final String trialSourceIndexName = trialRolloverNames.sourceName(); final String trialRolloverIndexName = trialRolloverNames.rolloverName(); MetadataCreateIndexService.validateIndexName(trialRolloverIndexName, clusterState.metadata(), clusterState.routingTable()); - assert metadata.dataStreams().containsKey(rolloverRequest.getRolloverTarget()) : "Auto-rollover applies only to data streams"; + assert metadata.dataStreams().containsKey(resolvedRolloverTarget.resource()) : "Auto-rollover applies only to data streams"; String source = "lazy_rollover source [" + trialSourceIndexName + "] to target [" + trialRolloverIndexName + "]"; // We create a new rollover request to ensure that it doesn't contain any other parameters apart from the data stream name // This will provide a more resilient user experience - var newRolloverRequest = new RolloverRequest(rolloverRequest.getRolloverTarget(), null); - newRolloverRequest.setIndicesOptions(rolloverRequest.indicesOptions()); + var newRolloverRequest = new RolloverRequest(resolvedRolloverTarget.combined(), null); LazyRolloverTask rolloverTask = new LazyRolloverTask(newRolloverRequest, listener); lazyRolloverTaskQueue.submitTask(source, rolloverTask, rolloverRequest.masterNodeTimeout()); } @@ -223,12 +231,19 @@ public ClusterState executeTask( AllocationActionMultiListener allocationActionMultiListener ) throws Exception { + ResolvedExpression resolvedRolloverTarget = SelectorResolver.parseExpression( + rolloverRequest.getRolloverTarget(), + rolloverRequest.indicesOptions() + ); + boolean isFailureStoreRollover = resolvedRolloverTarget.selector() != null + && resolvedRolloverTarget.selector().shouldIncludeFailures(); + // If the data stream has been rolled over since it was marked for lazy rollover, this operation is a noop - final DataStream dataStream = currentState.metadata().dataStreams().get(rolloverRequest.getRolloverTarget()); + final DataStream dataStream = currentState.metadata().dataStreams().get(resolvedRolloverTarget.resource()); assert dataStream != null; - if (isLazyRolloverNeeded(dataStream, rolloverRequest.targetsFailureStore()) == false) { - final DataStream.DataStreamIndices targetIndices = dataStream.getDataStreamIndices(rolloverRequest.targetsFailureStore()); + if (isLazyRolloverNeeded(dataStream, isFailureStoreRollover) == false) { + final DataStream.DataStreamIndices targetIndices = dataStream.getDataStreamIndices(isFailureStoreRollover); var noopResponse = noopLazyRolloverResponse(targetIndices); notifyAllListeners(rolloverTaskContexts, context -> context.getTask().listener.onResponse(noopResponse)); return currentState; @@ -237,7 +252,7 @@ public ClusterState executeTask( // Perform the actual rollover final var rolloverResult = rolloverService.rolloverClusterState( currentState, - rolloverRequest.getRolloverTarget(), + resolvedRolloverTarget.resource(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), List.of(), @@ -246,7 +261,7 @@ public ClusterState executeTask( false, null, null, - rolloverRequest.targetsFailureStore() + isFailureStoreRollover ); results.add(rolloverResult); logger.trace("lazy rollover result [{}]", rolloverResult); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index 552ce727d4249..608d32d50a856 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -16,7 +16,8 @@ import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; -import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SelectorResolver; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.mapper.MapperService; @@ -81,7 +82,7 @@ public class RolloverRequest extends AcknowledgedRequest implem private RolloverConditions conditions = new RolloverConditions(); // the index name "_na_" is never read back, what matters are settings, mappings and aliases private CreateIndexRequest createIndexRequest = new CreateIndexRequest("_na_"); - private IndicesOptions indicesOptions = IndicesOptions.strictSingleIndexNoExpandForbidClosed(); + private IndicesOptions indicesOptions = IndicesOptions.strictSingleIndexNoExpandForbidClosedAllowSelectors(); public RolloverRequest(StreamInput in) throws IOException { super(in); @@ -125,12 +126,15 @@ public ActionRequestValidationException validate() { ); } - var selector = indicesOptions.selectorOptions().defaultSelector(); - if (selector == IndexComponentSelector.ALL_APPLICABLE) { - validationException = addValidationError( - "rollover cannot be applied to both regular and failure indices at the same time", - validationException - ); + if (rolloverTarget != null) { + ResolvedExpression resolvedExpression = SelectorResolver.parseExpression(rolloverTarget, indicesOptions); + IndexComponentSelector selector = resolvedExpression.selector(); + if (IndexComponentSelector.ALL_APPLICABLE.equals(selector)) { + validationException = addValidationError( + "rollover cannot be applied to both regular and failure indices at the same time", + validationException + ); + } } return validationException; @@ -162,13 +166,6 @@ public IndicesOptions indicesOptions() { return indicesOptions; } - /** - * @return true of the rollover request targets the failure store, false otherwise. - */ - public boolean targetsFailureStore() { - return DataStream.isFailureStoreFeatureFlagEnabled() && indicesOptions.includeFailureIndices(); - } - public void setIndicesOptions(IndicesOptions indicesOptions) { this.indicesOptions = indicesOptions; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index c5c874f9bcddf..4f0aa9c5bade4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -36,6 +36,8 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexMetadataStats; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SelectorResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataDataStreamsService; @@ -149,8 +151,7 @@ protected ClusterBlockException checkBlock(RolloverRequest request, ClusterState .matchOpen(request.indicesOptions().expandWildcardsOpen()) .matchClosed(request.indicesOptions().expandWildcardsClosed()) .build(), - IndicesOptions.GatekeeperOptions.DEFAULT, - request.indicesOptions().selectorOptions() + IndicesOptions.GatekeeperOptions.DEFAULT ); return state.blocks() @@ -170,11 +171,18 @@ protected void masterOperation( assert task instanceof CancellableTask; Metadata metadata = clusterState.metadata(); + + // Parse the rollover request's target since the expression it may contain a selector on it + ResolvedExpression resolvedRolloverTarget = SelectorResolver.parseExpression( + rolloverRequest.getRolloverTarget(), + rolloverRequest.indicesOptions() + ); + boolean targetFailureStore = resolvedRolloverTarget.selector() != null && resolvedRolloverTarget.selector().shouldIncludeFailures(); + // We evaluate the names of the index for which we should evaluate conditions, as well as what our newly created index *would* be. - boolean targetFailureStore = rolloverRequest.targetsFailureStore(); final MetadataRolloverService.NameResolution trialRolloverNames = MetadataRolloverService.resolveRolloverNames( clusterState, - rolloverRequest.getRolloverTarget(), + resolvedRolloverTarget.resource(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), targetFailureStore @@ -183,7 +191,7 @@ protected void masterOperation( final String trialRolloverIndexName = trialRolloverNames.rolloverName(); MetadataCreateIndexService.validateIndexName(trialRolloverIndexName, metadata, clusterState.routingTable()); - boolean isDataStream = metadata.dataStreams().containsKey(rolloverRequest.getRolloverTarget()); + boolean isDataStream = metadata.dataStreams().containsKey(resolvedRolloverTarget.resource()); if (rolloverRequest.isLazy()) { if (isDataStream == false || rolloverRequest.getConditions().hasConditions()) { String message; @@ -201,7 +209,7 @@ protected void masterOperation( } if (rolloverRequest.isDryRun() == false) { metadataDataStreamsService.setRolloverOnWrite( - rolloverRequest.getRolloverTarget(), + resolvedRolloverTarget.resource(), true, targetFailureStore, rolloverRequest.ackTimeout(), @@ -225,7 +233,7 @@ protected void masterOperation( final IndexAbstraction rolloverTargetAbstraction = clusterState.metadata() .getIndicesLookup() - .get(rolloverRequest.getRolloverTarget()); + .get(resolvedRolloverTarget.resource()); if (rolloverTargetAbstraction.getType() == IndexAbstraction.Type.ALIAS && rolloverTargetAbstraction.isDataStreamRelated()) { listener.onFailure( new IllegalStateException("Aliases to data streams cannot be rolled over. Please rollover the data stream itself.") @@ -246,10 +254,10 @@ protected void masterOperation( final var statsIndicesOptions = new IndicesOptions( IndicesOptions.ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS, IndicesOptions.WildcardOptions.builder().matchClosed(true).allowEmptyExpressions(false).build(), - IndicesOptions.GatekeeperOptions.DEFAULT, - rolloverRequest.indicesOptions().selectorOptions() + IndicesOptions.GatekeeperOptions.DEFAULT ); - IndicesStatsRequest statsRequest = new IndicesStatsRequest().indices(rolloverRequest.getRolloverTarget()) + // Make sure to recombine any selectors on the stats request + IndicesStatsRequest statsRequest = new IndicesStatsRequest().indices(resolvedRolloverTarget.combined()) .clear() .indicesOptions(statsIndicesOptions) .docs(true) @@ -266,9 +274,7 @@ protected void masterOperation( listener.delegateFailureAndWrap((delegate, statsResponse) -> { AutoShardingResult rolloverAutoSharding = null; - final IndexAbstraction indexAbstraction = clusterState.metadata() - .getIndicesLookup() - .get(rolloverRequest.getRolloverTarget()); + final IndexAbstraction indexAbstraction = clusterState.metadata().getIndicesLookup().get(resolvedRolloverTarget.resource()); if (indexAbstraction.getType().equals(IndexAbstraction.Type.DATA_STREAM)) { DataStream dataStream = (DataStream) indexAbstraction; final Optional indexStats = Optional.ofNullable(statsResponse) @@ -492,14 +498,20 @@ public ClusterState executeTask( ) throws Exception { final var rolloverTask = rolloverTaskContext.getTask(); final var rolloverRequest = rolloverTask.rolloverRequest(); + ResolvedExpression resolvedRolloverTarget = SelectorResolver.parseExpression( + rolloverRequest.getRolloverTarget(), + rolloverRequest.indicesOptions() + ); + boolean targetFailureStore = resolvedRolloverTarget.selector() != null + && resolvedRolloverTarget.selector().shouldIncludeFailures(); // Regenerate the rollover names, as a rollover could have happened in between the pre-check and the cluster state update final var rolloverNames = MetadataRolloverService.resolveRolloverNames( currentState, - rolloverRequest.getRolloverTarget(), + resolvedRolloverTarget.resource(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), - rolloverRequest.targetsFailureStore() + targetFailureStore ); // Re-evaluate the conditions, now with our final source index name @@ -532,7 +544,7 @@ public ClusterState executeTask( final IndexAbstraction rolloverTargetAbstraction = currentState.metadata() .getIndicesLookup() - .get(rolloverRequest.getRolloverTarget()); + .get(resolvedRolloverTarget.resource()); final IndexMetadataStats sourceIndexStats = rolloverTargetAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM ? IndexMetadataStats.fromStatsResponse(rolloverSourceIndex, rolloverTask.statsResponse()) @@ -541,7 +553,7 @@ public ClusterState executeTask( // Perform the actual rollover final var rolloverResult = rolloverService.rolloverClusterState( currentState, - rolloverRequest.getRolloverTarget(), + resolvedRolloverTarget.resource(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), metConditions, @@ -550,7 +562,7 @@ public ClusterState executeTask( false, sourceIndexStats, rolloverTask.autoShardingResult(), - rolloverRequest.targetsFailureStore() + targetFailureStore ); results.add(rolloverResult); logger.trace("rollover result [{}]", rolloverResult); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java index 5f98852148ed4..d3d557b598b3a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java @@ -214,8 +214,7 @@ public static ClusterState resolveTemporaryState( .build(); final IndexMetadata indexMetadata = IndexMetadata.builder(indexName) - // handle mixed-cluster states by passing in minTransportVersion to reset event.ingested range to UNKNOWN if an older version - .eventIngestedRange(getEventIngestedRange(indexName, simulatedState), simulatedState.getMinTransportVersion()) + .eventIngestedRange(getEventIngestedRange(indexName, simulatedState)) .settings(dummySettings) .build(); return ClusterState.builder(simulatedState) @@ -304,8 +303,7 @@ public static Template resolveTemplate( dummySettings.put(templateSettings); final IndexMetadata indexMetadata = IndexMetadata.builder(indexName) - // handle mixed-cluster states by passing in minTransportVersion to reset event.ingested range to UNKNOWN if an older version - .eventIngestedRange(getEventIngestedRange(indexName, simulatedState), simulatedState.getMinTransportVersion()) + .eventIngestedRange(getEventIngestedRange(indexName, simulatedState)) .settings(dummySettings) .build(); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java index b137809047d18..dd473869fb2d9 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java @@ -24,7 +24,7 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.client.internal.node.NodeClient; @@ -216,11 +216,9 @@ private void rollOverFailureStores(Runnable runnable) { } try (RefCountingRunnable refs = new RefCountingRunnable(runnable)) { for (String dataStream : failureStoresToBeRolledOver) { - RolloverRequest rolloverRequest = new RolloverRequest(dataStream, null); - rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.FAILURES) - .build() + RolloverRequest rolloverRequest = new RolloverRequest( + IndexNameExpressionResolver.combineSelector(dataStream, IndexComponentSelector.FAILURES), + null ); // We are executing a lazy rollover because it is an action specialised for this situation, when we want an // unconditional and performant rollover. diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 65264faf50129..2a6a789d9d312 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -25,7 +25,7 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; @@ -425,11 +425,7 @@ private void rollOverDataStreams( RolloverRequest rolloverRequest = new RolloverRequest(dataStream, null); rolloverRequest.masterNodeTimeout(bulkRequest.timeout); if (targetFailureStore) { - rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.FAILURES) - .build() - ); + rolloverRequest.setRolloverTarget(IndexNameExpressionResolver.combineSelector(dataStream, IndexComponentSelector.FAILURES)); } // We are executing a lazy rollover because it is an action specialised for this situation, when we want an // unconditional and performant rollover. @@ -438,9 +434,8 @@ private void rollOverDataStreams( @Override public void onResponse(RolloverResponse result) { logger.debug( - "Data stream{} {} has {} over, the latest index is {}", - rolloverRequest.targetsFailureStore() ? " failure store" : "", - dataStream, + "Data stream [{}] has {} over, the latest index is {}", + rolloverRequest.getRolloverTarget(), result.isRolledOver() ? "been successfully rolled" : "skipped rolling", result.getNewIndex() ); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsActionUtil.java b/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsActionUtil.java index a0a05138406c5..62caba8f7ed96 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsActionUtil.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsActionUtil.java @@ -9,16 +9,18 @@ package org.elasticsearch.action.datastreams; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.index.Index; +import java.util.ArrayList; import java.util.List; import java.util.SortedMap; -import java.util.stream.Stream; public class DataStreamsActionUtil { @@ -47,25 +49,79 @@ public static IndicesOptions updateIndicesOptions(IndicesOptions indicesOptions) return indicesOptions; } - public static Stream resolveConcreteIndexNames( + public static List resolveConcreteIndexNames( IndexNameExpressionResolver indexNameExpressionResolver, ClusterState clusterState, String[] names, IndicesOptions indicesOptions ) { - List abstractionNames = getDataStreamNames(indexNameExpressionResolver, clusterState, names, indicesOptions); + List abstractionNames = indexNameExpressionResolver.dataStreams( + clusterState, + updateIndicesOptions(indicesOptions), + names + ); SortedMap indicesLookup = clusterState.getMetadata().getIndicesLookup(); - return abstractionNames.stream().flatMap(abstractionName -> { + List results = new ArrayList<>(abstractionNames.size()); + for (ResolvedExpression abstractionName : abstractionNames) { + IndexAbstraction indexAbstraction = indicesLookup.get(abstractionName.resource()); + assert indexAbstraction != null; + if (indexAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM) { + selectDataStreamIndicesNames( + (DataStream) indexAbstraction, + IndexComponentSelector.FAILURES.equals(abstractionName.selector()), + results + ); + } + } + return results; + } + + /** + * Resolves a list of expressions into data stream names and then collects the concrete indices + * that are applicable for those data streams based on the selector provided in the arguments. + * @param indexNameExpressionResolver resolver object + * @param clusterState state to query + * @param names data stream expressions + * @param selector which component indices of the data stream should be returned + * @param indicesOptions options for expression resolution + * @return A stream of concrete index names that belong to the components specified + * on the data streams returned from the expressions given + */ + public static List resolveConcreteIndexNamesWithSelector( + IndexNameExpressionResolver indexNameExpressionResolver, + ClusterState clusterState, + String[] names, + IndexComponentSelector selector, + IndicesOptions indicesOptions + ) { + assert indicesOptions.allowSelectors() == false : "If selectors are enabled, use resolveConcreteIndexNames instead"; + List abstractionNames = indexNameExpressionResolver.dataStreamNames( + clusterState, + updateIndicesOptions(indicesOptions), + names + ); + SortedMap indicesLookup = clusterState.getMetadata().getIndicesLookup(); + + List results = new ArrayList<>(abstractionNames.size()); + for (String abstractionName : abstractionNames) { IndexAbstraction indexAbstraction = indicesLookup.get(abstractionName); assert indexAbstraction != null; if (indexAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM) { - DataStream dataStream = (DataStream) indexAbstraction; - List indices = dataStream.getIndices(); - return indices.stream().map(Index::getName); - } else { - return Stream.empty(); + if (selector.shouldIncludeData()) { + selectDataStreamIndicesNames((DataStream) indexAbstraction, false, results); + } + if (selector.shouldIncludeFailures()) { + selectDataStreamIndicesNames((DataStream) indexAbstraction, true, results); + } } - }); + } + return results; + } + + private static void selectDataStreamIndicesNames(DataStream indexAbstraction, boolean failureStore, List accumulator) { + for (Index index : indexAbstraction.getDataStreamIndices(failureStore).getIndices()) { + accumulator.add(index.getName()); + } } } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java index 9266bae439b73..82afeec752378 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java @@ -38,8 +38,6 @@ public DataStreamsStatsAction() { public static class Request extends BroadcastRequest { public Request() { - // this doesn't really matter since data stream name resolution isn't affected by IndicesOptions and - // a data stream's backing indices are retrieved from its metadata super( null, IndicesOptions.builder() @@ -58,10 +56,9 @@ public Request() { .allowAliasToMultipleIndices(true) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(true) + .allowSelectors(false) .build() ) - .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) .build() ); } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java index 4f647d4f02884..640c88918ffc0 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java @@ -61,7 +61,7 @@ public static class Request extends MasterNodeRequest implements Indice .allowAliasToMultipleIndices(false) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(true) + .allowSelectors(false) .build() ) .build(); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index 883fc543749c2..c55957787aee7 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -72,10 +72,11 @@ public static class Request extends MasterNodeReadRequest implements In .allowAliasToMultipleIndices(false) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(true) + .allowSelectors(false) .build() ) .build(); + private boolean includeDefaults = false; private boolean verbose = false; diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java index a43d29501a7ee..401bd7a27c6fa 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java @@ -63,7 +63,7 @@ public static class Request extends MasterNodeReadRequest implements In .allowAliasToMultipleIndices(false) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(true) + .allowSelectors(false) .build() ) .build(); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java index b054d12890366..c2b7de8d5df8b 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java @@ -94,7 +94,7 @@ public static Request parseRequest(XContentParser parser, Factory factory) { .allowAliasToMultipleIndices(false) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(false) + .allowSelectors(false) .build() ) .build(); diff --git a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java index 62771230636c1..cce01aca7685a 100644 --- a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java +++ b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java @@ -82,7 +82,7 @@ public String[] indices() { @Override public IndicesOptions indicesOptions() { - return IndicesOptions.STRICT_SINGLE_INDEX_NO_EXPAND_FORBID_CLOSED; + return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java index ba1afaf4678fb..7890a0f9f9738 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java @@ -21,7 +21,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.search.Scroll; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.builder.SubSearchSourceBuilder; @@ -49,7 +48,8 @@ public class CanMatchNodeRequest extends TransportRequest implements IndicesRequ private final SearchType searchType; private final Boolean requestCache; private final boolean allowPartialSearchResults; - private final Scroll scroll; + @Nullable + private final TimeValue scroll; private final int numberOfShards; private final long nowInMillis; @Nullable @@ -195,7 +195,7 @@ public CanMatchNodeRequest(StreamInput in) throws IOException { ); } } - scroll = in.readOptionalWriteable(Scroll::new); + scroll = in.readOptionalTimeValue(); requestCache = in.readOptionalBoolean(); allowPartialSearchResults = in.readBoolean(); numberOfShards = in.readVInt(); @@ -216,7 +216,7 @@ public void writeTo(StreamOutput out) throws IOException { // types not supported so send an empty array to previous versions out.writeStringArray(Strings.EMPTY_ARRAY); } - out.writeOptionalWriteable(scroll); + out.writeOptionalTimeValue(scroll); out.writeOptionalBoolean(requestCache); out.writeBoolean(allowPartialSearchResults); out.writeVInt(numberOfShards); diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index 0fbface3793a8..8568b60916761 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -271,7 +271,7 @@ private void moveToNextPhase( ) { context.executeNextPhase(this, () -> { var resp = SearchPhaseController.merge(context.getRequest().scroll() != null, reducedQueryPhase, fetchResultsArr); - context.addReleasable(resp::decRef); + context.addReleasable(resp); return nextPhaseFactory.apply(resp, searchPhaseShardResults); }); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 2e1d58e042f09..8b77ec7fb5463 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -24,7 +24,6 @@ import org.elasticsearch.index.mapper.SourceLoader; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.Rewriteable; -import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.SearchContext; @@ -82,7 +81,7 @@ public class SearchRequest extends ActionRequest implements IndicesRequest.Repla private Boolean allowPartialSearchResults; - private Scroll scroll; + private TimeValue scrollKeepAlive; private int batchedReduceSize = DEFAULT_BATCHED_REDUCE_SIZE; @@ -206,7 +205,7 @@ private SearchRequest( this.preFilterShardSize = searchRequest.preFilterShardSize; this.requestCache = searchRequest.requestCache; this.routing = searchRequest.routing; - this.scroll = searchRequest.scroll; + this.scrollKeepAlive = searchRequest.scrollKeepAlive; this.searchType = searchRequest.searchType; this.source = searchRequest.source; this.localClusterAlias = localClusterAlias; @@ -229,7 +228,7 @@ public SearchRequest(StreamInput in) throws IOException { indices = in.readStringArray(); routing = in.readOptionalString(); preference = in.readOptionalString(); - scroll = in.readOptionalWriteable(Scroll::new); + scrollKeepAlive = in.readOptionalTimeValue(); source = in.readOptionalWriteable(SearchSourceBuilder::new); if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { // types no longer relevant so ignore @@ -276,7 +275,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(indices); out.writeOptionalString(routing); out.writeOptionalString(preference); - out.writeOptionalWriteable(scroll); + out.writeOptionalTimeValue(scrollKeepAlive); out.writeOptionalWriteable(source); if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { // types not supported so send an empty array to previous versions @@ -525,23 +524,16 @@ public String[] indices() { /** * If set, will enable scrolling of the search request. */ - public Scroll scroll() { - return scroll; - } - - /** - * If set, will enable scrolling of the search request. - */ - public SearchRequest scroll(Scroll scroll) { - this.scroll = scroll; - return this; + public TimeValue scroll() { + return scrollKeepAlive; } /** * If set, will enable scrolling of the search request for the specified timeout. */ public SearchRequest scroll(TimeValue keepAlive) { - return scroll(new Scroll(keepAlive)); + this.scrollKeepAlive = keepAlive; + return this; } /** @@ -681,7 +673,7 @@ public boolean hasKnnSearch() { } public int resolveTrackTotalHitsUpTo() { - return resolveTrackTotalHitsUpTo(scroll, source); + return resolveTrackTotalHitsUpTo(scrollKeepAlive, source); } /** @@ -731,7 +723,7 @@ public SearchRequest rewrite(QueryRewriteContext ctx) throws IOException { return hasChanged ? new SearchRequest(this).source(source) : this; } - public static int resolveTrackTotalHitsUpTo(Scroll scroll, SearchSourceBuilder source) { + public static int resolveTrackTotalHitsUpTo(TimeValue scroll, SearchSourceBuilder source) { if (scroll != null) { // no matter what the value of track_total_hits is return SearchContext.TRACK_TOTAL_HITS_ACCURATE; @@ -752,8 +744,8 @@ public final String buildDescription() { Strings.arrayToDelimitedString(indices, ",", sb); sb.append("]"); sb.append(", search_type[").append(searchType).append("]"); - if (scroll != null) { - sb.append(", scroll[").append(scroll.keepAlive()).append("]"); + if (scrollKeepAlive != null) { + sb.append(", scroll[").append(scrollKeepAlive).append("]"); } if (source != null) { sb.append(", source[").append(source.toString(FORMAT_PARAMS)).append("]"); @@ -784,7 +776,7 @@ public boolean equals(Object o) { && Objects.equals(preference, that.preference) && Objects.equals(source, that.source) && Objects.equals(requestCache, that.requestCache) - && Objects.equals(scroll, that.scroll) + && Objects.equals(scrollKeepAlive, that.scrollKeepAlive) && Objects.equals(batchedReduceSize, that.batchedReduceSize) && Objects.equals(maxConcurrentShardRequests, that.maxConcurrentShardRequests) && Objects.equals(preFilterShardSize, that.preFilterShardSize) @@ -805,7 +797,7 @@ public int hashCode() { preference, source, requestCache, - scroll, + scrollKeepAlive, indicesOptions, batchedReduceSize, maxConcurrentShardRequests, @@ -836,7 +828,7 @@ public String toString() { + ", requestCache=" + requestCache + ", scroll=" - + scroll + + scrollKeepAlive + ", maxConcurrentShardRequests=" + maxConcurrentShardRequests + ", batchedReduceSize=" diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 2927c394da3d4..d309ef3a7498a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -16,7 +16,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.script.Script; -import org.elasticsearch.search.Scroll; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; import org.elasticsearch.search.builder.PointInTimeBuilder; @@ -62,14 +61,6 @@ public SearchRequestBuilder setSearchType(SearchType searchType) { return this; } - /** - * If set, will enable scrolling of the search request. - */ - public SearchRequestBuilder setScroll(Scroll scroll) { - request.scroll(scroll); - return this; - } - /** * If set, will enable scrolling of the search request for the specified timeout. */ diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index 18edc83bf3dcc..6074a591a9762 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -343,8 +343,7 @@ public ShardSearchFailure[] getShardFailures() { } /** - * If scrolling was enabled ({@link SearchRequest#scroll(org.elasticsearch.search.Scroll)}, the - * scroll id that can be used to continue scrolling. + * If scrolling was enabled ({@link SearchRequest#scroll(TimeValue)}, the scroll id that can be used to continue scrolling. */ public String getScrollId() { return scrollId; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java index 8c9a42a61e33e..9d85348b80d62 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java @@ -9,14 +9,12 @@ package org.elasticsearch.action.search; -import org.elasticsearch.core.RefCounted; -import org.elasticsearch.core.SimpleRefCounted; +import org.elasticsearch.core.Releasable; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.profile.SearchProfileShardResult; import org.elasticsearch.search.suggest.Suggest; -import org.elasticsearch.transport.LeakTracker; import java.util.Collections; import java.util.Map; @@ -25,7 +23,7 @@ * Holds some sections that a search response is composed of (hits, aggs, suggestions etc.) during some steps of the search response * building. */ -public class SearchResponseSections implements RefCounted { +public class SearchResponseSections implements Releasable { public static final SearchResponseSections EMPTY_WITH_TOTAL_HITS = new SearchResponseSections( SearchHits.EMPTY_WITH_TOTAL_HITS, @@ -53,8 +51,6 @@ public class SearchResponseSections implements RefCounted { protected final Boolean terminatedEarly; protected final int numReducePhases; - private final RefCounted refCounted; - public SearchResponseSections( SearchHits hits, InternalAggregations aggregations, @@ -72,7 +68,6 @@ public SearchResponseSections( this.timedOut = timedOut; this.terminatedEarly = terminatedEarly; this.numReducePhases = numReducePhases; - refCounted = hits.getHits().length > 0 ? LeakTracker.wrap(new SimpleRefCounted()) : ALWAYS_REFERENCED; } public final SearchHits hits() { @@ -97,26 +92,7 @@ public final Map profile() { } @Override - public void incRef() { - refCounted.incRef(); - } - - @Override - public boolean tryIncRef() { - return refCounted.tryIncRef(); - } - - @Override - public boolean decRef() { - if (refCounted.decRef()) { - hits.decRef(); - return true; - } - return false; - } - - @Override - public boolean hasReferences() { - return refCounted.hasReferences(); + public void close() { + hits.decRef(); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java index 60e96a8cce8ab..2231f791384fa 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java @@ -246,8 +246,7 @@ protected final void sendResponse( if (request.scroll() != null) { scrollId = request.scrollId(); } - var sections = SearchPhaseController.merge(true, queryPhase, fetchResults); - try { + try (var sections = SearchPhaseController.merge(true, queryPhase, fetchResults)) { ActionListener.respondAndRelease( listener, new SearchResponse( @@ -262,8 +261,6 @@ protected final void sendResponse( null ) ); - } finally { - sections.decRef(); } } catch (Exception e) { listener.onFailure(new ReduceSearchPhaseException("fetch", "inner finish failed", e, buildShardFailures())); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java index 71b88b03a5463..1d34288665ae4 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.search.Scroll; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xcontent.ToXContentObject; @@ -30,7 +29,7 @@ public class SearchScrollRequest extends ActionRequest implements ToXContentObject { private String scrollId; - private Scroll scroll; + private TimeValue scroll; public SearchScrollRequest() {} @@ -41,14 +40,14 @@ public SearchScrollRequest(String scrollId) { public SearchScrollRequest(StreamInput in) throws IOException { super(in); scrollId = in.readString(); - scroll = in.readOptionalWriteable(Scroll::new); + scroll = in.readOptionalTimeValue(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(scrollId); - out.writeOptionalWriteable(scroll); + out.writeOptionalTimeValue(scroll); } @Override @@ -79,23 +78,16 @@ public ParsedScrollId parseScrollId() { /** * If set, will enable scrolling of the search request. */ - public Scroll scroll() { + public TimeValue scroll() { return scroll; } - /** - * If set, will enable scrolling of the search request. - */ - public SearchScrollRequest scroll(Scroll scroll) { - this.scroll = scroll; - return this; - } - /** * If set, will enable scrolling of the search request for the specified timeout. */ public SearchScrollRequest scroll(TimeValue keepAlive) { - return scroll(new Scroll(keepAlive)); + this.scroll = keepAlive; + return this; } @Override @@ -135,7 +127,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field("scroll_id", scrollId); if (scroll != null) { - builder.field("scroll", scroll.keepAlive().getStringRep()); + builder.field("scroll", scroll.getStringRep()); } builder.endObject(); return builder; @@ -157,7 +149,7 @@ public void fromXContent(XContentParser parser) throws IOException { } else if ("scroll_id".equals(currentFieldName) && token == XContentParser.Token.VALUE_STRING) { scrollId(parser.text()); } else if ("scroll".equals(currentFieldName) && token == XContentParser.Token.VALUE_STRING) { - scroll(new Scroll(TimeValue.parseTimeValue(parser.text(), null, "scroll"))); + scroll(TimeValue.parseTimeValue(parser.text(), null, "scroll")); } else { throw new IllegalArgumentException( "Unknown parameter [" + currentFieldName + "] in request body or parameter is of the wrong type[" + token + "] " diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java index 24dac98166ce0..57a6e44f4c2b0 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.search.Scroll; /** * A search scroll action request builder. @@ -35,14 +34,6 @@ public SearchScrollRequestBuilder setScrollId(String scrollId) { return this; } - /** - * If set, will enable scrolling of the search request. - */ - public SearchScrollRequestBuilder setScroll(Scroll scroll) { - request.scroll(scroll); - return this; - } - /** * If set, will enable scrolling of the search request for the specified timeout. */ diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index ebbd47336e3da..4231d598b2d70 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -13,7 +13,6 @@ import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.core.Nullable; @@ -47,37 +46,13 @@ * @param gatekeeperOptions, applies to all the resolved indices and defines if throttled will be included and if certain type of * aliases or indices are allowed, or they will throw an error. It acts as a gatekeeper when an action * does not support certain options. - * @param selectorOptions, applies to all resolved expressions, and it specifies the index component that should be included, if there - * is no index component defined on the expression level. */ public record IndicesOptions( ConcreteTargetOptions concreteTargetOptions, WildcardOptions wildcardOptions, - GatekeeperOptions gatekeeperOptions, - SelectorOptions selectorOptions + GatekeeperOptions gatekeeperOptions ) implements ToXContentFragment { - /** - * @deprecated this query param will be replaced by the selector `::` on the expression level - */ - @Deprecated - public static final String FAILURE_STORE_QUERY_PARAM = "failure_store"; - /** - * @deprecated this value will be replaced by the selector `::*` on the expression level - */ - @Deprecated - public static final String INCLUDE_ALL = "include"; - /** - * @deprecated this value will be replaced by the selector `::data` on the expression level - */ - @Deprecated - public static final String INCLUDE_ONLY_REGULAR_INDICES = "exclude"; - /** - * @deprecated this value will be replaced by the selector `::failures` on the expression level - */ - @Deprecated - public static final String INCLUDE_ONLY_FAILURE_INDICES = "only"; - public static IndicesOptions.Builder builder() { return new Builder(); } @@ -324,14 +299,14 @@ public static Builder builder(WildcardOptions wildcardOptions) { * - The ignoreThrottled flag, which is a deprecated flag that will filter out frozen indices. * @param allowAliasToMultipleIndices, allow aliases to multiple indices, true by default. * @param allowClosedIndices, allow closed indices, true by default. - * @param allowFailureIndices, allow failure indices in the response, true by default + * @param allowSelectors, allow selectors within index expressions, true by default. * @param ignoreThrottled, filters out throttled (aka frozen indices), defaults to true. This is deprecated and the only one * that only filters and never throws an error. */ public record GatekeeperOptions( boolean allowAliasToMultipleIndices, boolean allowClosedIndices, - boolean allowFailureIndices, + boolean allowSelectors, @Deprecated boolean ignoreThrottled ) implements ToXContentFragment { @@ -355,7 +330,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public static class Builder { private boolean allowAliasToMultipleIndices; private boolean allowClosedIndices; - private boolean allowFailureIndices; + private boolean allowSelectors; private boolean ignoreThrottled; public Builder() { @@ -365,7 +340,7 @@ public Builder() { Builder(GatekeeperOptions options) { allowAliasToMultipleIndices = options.allowAliasToMultipleIndices; allowClosedIndices = options.allowClosedIndices; - allowFailureIndices = options.allowFailureIndices; + allowSelectors = options.allowSelectors; ignoreThrottled = options.ignoreThrottled; } @@ -388,11 +363,12 @@ public Builder allowClosedIndices(boolean allowClosedIndices) { } /** - * Failure indices are accepted when true, otherwise the resolution will throw an error. + * Selectors are allowed within index expressions when true, otherwise the resolution will treat their presence as a syntax + * error when resolving index expressions. * Defaults to true. */ - public Builder allowFailureIndices(boolean allowFailureIndices) { - this.allowFailureIndices = allowFailureIndices; + public Builder allowSelectors(boolean allowSelectors) { + this.allowSelectors = allowSelectors; return this; } @@ -405,7 +381,7 @@ public Builder ignoreThrottled(boolean ignoreThrottled) { } public GatekeeperOptions build() { - return new GatekeeperOptions(allowAliasToMultipleIndices, allowClosedIndices, allowFailureIndices, ignoreThrottled); + return new GatekeeperOptions(allowAliasToMultipleIndices, allowClosedIndices, allowSelectors, ignoreThrottled); } } @@ -418,50 +394,6 @@ public static Builder builder(GatekeeperOptions gatekeeperOptions) { } } - /** - * Defines which selectors should be used by default for an index operation in the event that no selectors are provided. - */ - public record SelectorOptions(IndexComponentSelector defaultSelector) implements Writeable { - - public static final SelectorOptions ALL_APPLICABLE = new SelectorOptions(IndexComponentSelector.ALL_APPLICABLE); - public static final SelectorOptions DATA = new SelectorOptions(IndexComponentSelector.DATA); - public static final SelectorOptions FAILURES = new SelectorOptions(IndexComponentSelector.FAILURES); - /** - * Default instance. Uses
::data
as the default selector if none are present in an index expression. - */ - public static final SelectorOptions DEFAULT = DATA; - - public static SelectorOptions read(StreamInput in) throws IOException { - if (in.getTransportVersion().before(TransportVersions.INTRODUCE_ALL_APPLICABLE_SELECTOR)) { - EnumSet set = in.readEnumSet(IndexComponentSelector.class); - if (set.isEmpty() || set.size() == 2) { - assert set.contains(IndexComponentSelector.DATA) && set.contains(IndexComponentSelector.FAILURES) - : "The enum set only supported ::data and ::failures"; - return SelectorOptions.ALL_APPLICABLE; - } else if (set.contains(IndexComponentSelector.DATA)) { - return SelectorOptions.DATA; - } else { - return SelectorOptions.FAILURES; - } - } else { - return new SelectorOptions(IndexComponentSelector.read(in)); - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().before(TransportVersions.INTRODUCE_ALL_APPLICABLE_SELECTOR)) { - switch (defaultSelector) { - case ALL_APPLICABLE -> out.writeEnumSet(EnumSet.of(IndexComponentSelector.DATA, IndexComponentSelector.FAILURES)); - case DATA -> out.writeEnumSet(EnumSet.of(IndexComponentSelector.DATA)); - case FAILURES -> out.writeEnumSet(EnumSet.of(IndexComponentSelector.FAILURES)); - } - } else { - defaultSelector.writeTo(out); - } - } - } - /** * This class is maintained for backwards compatibility and performance purposes. We use it for serialisation along with {@link Option}. */ @@ -497,7 +429,8 @@ private enum Option { ERROR_WHEN_CLOSED_INDICES, IGNORE_THROTTLED, - ALLOW_FAILURE_INDICES // Added in 8.14 + ALLOW_FAILURE_INDICES, // Added in 8.14, Removed in 8.18 + ALLOW_SELECTORS // Added in 8.18 } private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(IndicesOptions.class); @@ -510,8 +443,7 @@ private enum Option { public static final IndicesOptions DEFAULT = new IndicesOptions( ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS, WildcardOptions.DEFAULT, - GatekeeperOptions.DEFAULT, - SelectorOptions.DEFAULT + GatekeeperOptions.DEFAULT ); public static final IndicesOptions STRICT_EXPAND_OPEN = IndicesOptions.builder() @@ -528,10 +460,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FAILURE_STORE = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -547,10 +478,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ALL_APPLICABLE) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -566,10 +496,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_NO_SELECTORS = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -585,7 +514,7 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(false) + .allowSelectors(false) .ignoreThrottled(false) ) .build(); @@ -603,10 +532,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -622,10 +550,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED_HIDDEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -636,10 +563,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED_HIDDEN_NO_SELECTOR = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -650,7 +576,7 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(false) + .allowSelectors(false) .ignoreThrottled(false) ) .build(); @@ -668,10 +594,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -682,10 +607,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN_NO_SELECTORS = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -696,7 +620,7 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(false) + .allowSelectors(false) .ignoreThrottled(false) ) .build(); @@ -714,10 +638,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ALL_APPLICABLE) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN_FAILURE_STORE = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -728,10 +651,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ALL_APPLICABLE) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_FAILURE_STORE = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -747,10 +669,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ALL_APPLICABLE) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -766,10 +687,9 @@ private enum Option { GatekeeperOptions.builder() .allowClosedIndices(false) .allowAliasToMultipleIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_HIDDEN_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -785,10 +705,9 @@ private enum Option { GatekeeperOptions.builder() .allowClosedIndices(false) .allowAliasToMultipleIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FORBID_CLOSED_IGNORE_THROTTLED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -804,10 +723,9 @@ private enum Option { GatekeeperOptions.builder() .ignoreThrottled(true) .allowClosedIndices(false) - .allowFailureIndices(true) + .allowSelectors(true) .allowAliasToMultipleIndices(true) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_SINGLE_INDEX_NO_EXPAND_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -823,10 +741,27 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(false) .allowClosedIndices(false) - .allowFailureIndices(true) + .allowSelectors(false) + .ignoreThrottled(false) + ) + .build(); + public static final IndicesOptions STRICT_SINGLE_INDEX_NO_EXPAND_FORBID_CLOSED_ALLOW_SELECTORS = IndicesOptions.builder() + .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) + .wildcardOptions( + WildcardOptions.builder() + .matchOpen(false) + .matchClosed(false) + .includeHidden(false) + .allowEmptyExpressions(true) + .resolveAliases(true) + ) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowAliasToMultipleIndices(false) + .allowClosedIndices(false) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_NO_EXPAND_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -842,10 +777,9 @@ private enum Option { GatekeeperOptions.builder() .allowClosedIndices(false) .allowAliasToMultipleIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); /** @@ -903,10 +837,10 @@ public boolean forbidClosedIndices() { } /** - * @return Whether execution on failure indices is allowed. + * @return Whether selectors (::) are allowed in the index expression. */ - public boolean allowFailureIndices() { - return gatekeeperOptions.allowFailureIndices(); + public boolean allowSelectors() { + return DataStream.isFailureStoreFeatureFlagEnabled() && gatekeeperOptions.allowSelectors(); } /** @@ -930,20 +864,6 @@ public boolean ignoreThrottled() { return gatekeeperOptions().ignoreThrottled(); } - /** - * @return whether regular indices (stand-alone or backing indices) will be included in the response - */ - public boolean includeRegularIndices() { - return selectorOptions().defaultSelector().shouldIncludeData(); - } - - /** - * @return whether failure indices (only supported by certain data streams) will be included in the response - */ - public boolean includeFailureIndices() { - return selectorOptions().defaultSelector().shouldIncludeFailures(); - } - public void writeIndicesOptions(StreamOutput out) throws IOException { EnumSet