From 5c35bff1c33d96227d7497af4b87c87b173bc167 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 11 Sep 2017 15:38:30 -0700 Subject: [PATCH 01/67] Test: Remove leftover static bwc test case (#26584) This test case was leftover from the static bwc tests. There was still one use for checking we do not load old indices, but this PR moves the legacy code needed for that directly into the test. I also opened a follow up issue to completely remove the unsupported test: #26583. --- .../RecoveryWithUnsupportedIndicesIT.java | 61 ++++++++++++++++++- .../StaticIndexBackwardCompatibilityIT.java | 55 ----------------- .../elasticsearch/test/ESIntegTestCase.java | 44 ------------- 3 files changed, 60 insertions(+), 100 deletions(-) delete mode 100644 core/src/test/java/org/elasticsearch/bwcompat/StaticIndexBackwardCompatibilityIT.java diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java b/core/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java index 429266c45892c..50f328db39306 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java @@ -18,11 +18,70 @@ */ package org.elasticsearch.bwcompat; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; + +import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.TestUtil; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.test.ESIntegTestCase; import static org.hamcrest.Matchers.containsString; -public class RecoveryWithUnsupportedIndicesIT extends StaticIndexBackwardCompatibilityIT { +@LuceneTestCase.SuppressCodecs("*") +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, minNumDataNodes = 0, maxNumDataNodes = 0) +public class RecoveryWithUnsupportedIndicesIT extends ESIntegTestCase { + + /** + * Return settings that could be used to start a node that has the given zipped home directory. + */ + protected Settings prepareBackwardsDataDir(Path backwardsIndex, Object... settings) throws IOException { + Path indexDir = createTempDir(); + Path dataDir = indexDir.resolve("data"); + try (InputStream stream = Files.newInputStream(backwardsIndex)) { + TestUtil.unzip(stream, indexDir); + } + assertTrue(Files.exists(dataDir)); + + // list clusters in the datapath, ignoring anything from extrasfs + final Path[] list; + try (DirectoryStream stream = Files.newDirectoryStream(dataDir)) { + List dirs = new ArrayList<>(); + for (Path p : stream) { + if (!p.getFileName().toString().startsWith("extra")) { + dirs.add(p); + } + } + list = dirs.toArray(new Path[0]); + } + + if (list.length != 1) { + StringBuilder builder = new StringBuilder("Backwards index must contain exactly one cluster\n"); + for (Path line : list) { + builder.append(line.toString()).append('\n'); + } + throw new IllegalStateException(builder.toString()); + } + Path src = list[0].resolve(NodeEnvironment.NODES_FOLDER); + Path dest = dataDir.resolve(NodeEnvironment.NODES_FOLDER); + assertTrue(Files.exists(src)); + Files.move(src, dest); + assertFalse(Files.exists(src)); + assertTrue(Files.exists(dest)); + Settings.Builder builder = Settings.builder() + .put(settings) + .put(Environment.PATH_DATA_SETTING.getKey(), dataDir.toAbsolutePath()); + + return builder.build(); + } + public void testUpgradeStartClusterOn_0_20_6() throws Exception { String indexName = "unsupported-0.20.6"; diff --git a/core/src/test/java/org/elasticsearch/bwcompat/StaticIndexBackwardCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/StaticIndexBackwardCompatibilityIT.java deleted file mode 100644 index 3884d3475e12a..0000000000000 --- a/core/src/test/java/org/elasticsearch/bwcompat/StaticIndexBackwardCompatibilityIT.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.bwcompat; - -import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESIntegTestCase; - -import static org.hamcrest.Matchers.greaterThanOrEqualTo; - -/** - * These tests are against static indexes, built from versions of ES that cannot be upgraded without - * a full cluster restart (ie no wire format compatibility). - */ -@LuceneTestCase.SuppressCodecs("*") -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, minNumDataNodes = 0, maxNumDataNodes = 0) -public class StaticIndexBackwardCompatibilityIT extends ESIntegTestCase { - - public void loadIndex(String index, Object... settings) throws Exception { - logger.info("Checking static index {}", index); - Settings nodeSettings = prepareBackwardsDataDir(getDataPath(index + ".zip"), settings); - internalCluster().startNode(nodeSettings); - ensureGreen(index); - assertIndexSanity(index); - } - - private void assertIndexSanity(String index) { - GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().get(); - assertEquals(1, getIndexResponse.indices().length); - assertEquals(index, getIndexResponse.indices()[0]); - ensureYellow(index); - SearchResponse test = client().prepareSearch(index).get(); - assertThat(test.getHits().getTotalHits(), greaterThanOrEqualTo(1L)); - } - -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 03853f61c8834..2753e4013c181 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.Sort; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.TestUtil; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; @@ -141,7 +140,6 @@ import org.junit.BeforeClass; import java.io.IOException; -import java.io.InputStream; import java.lang.annotation.Annotation; import java.lang.annotation.ElementType; import java.lang.annotation.Inherited; @@ -151,7 +149,6 @@ import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.URL; -import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; @@ -2113,48 +2110,7 @@ protected String routingKeyForShard(String index, int shard) { return internalCluster().routingKeyForShard(resolveIndex(index), shard, random()); } - /** - * Return settings that could be used to start a node that has the given zipped home directory. - */ - protected Settings prepareBackwardsDataDir(Path backwardsIndex, Object... settings) throws IOException { - Path indexDir = createTempDir(); - Path dataDir = indexDir.resolve("data"); - try (InputStream stream = Files.newInputStream(backwardsIndex)) { - TestUtil.unzip(stream, indexDir); - } - assertTrue(Files.exists(dataDir)); - - // list clusters in the datapath, ignoring anything from extrasfs - final Path[] list; - try (DirectoryStream stream = Files.newDirectoryStream(dataDir)) { - List dirs = new ArrayList<>(); - for (Path p : stream) { - if (!p.getFileName().toString().startsWith("extra")) { - dirs.add(p); - } - } - list = dirs.toArray(new Path[0]); - } - - if (list.length != 1) { - StringBuilder builder = new StringBuilder("Backwards index must contain exactly one cluster\n"); - for (Path line : list) { - builder.append(line.toString()).append('\n'); - } - throw new IllegalStateException(builder.toString()); - } - Path src = list[0].resolve(NodeEnvironment.NODES_FOLDER); - Path dest = dataDir.resolve(NodeEnvironment.NODES_FOLDER); - assertTrue(Files.exists(src)); - Files.move(src, dest); - assertFalse(Files.exists(src)); - assertTrue(Files.exists(dest)); - Settings.Builder builder = Settings.builder() - .put(settings) - .put(Environment.PATH_DATA_SETTING.getKey(), dataDir.toAbsolutePath()); - return builder.build(); - } @Override protected NamedXContentRegistry xContentRegistry() { From 8ba4ff3be03552ed769718a21be51db13e390818 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 11 Sep 2017 15:43:34 -0700 Subject: [PATCH 02/67] Build: Move javadoc linking to root build.gradle (#26529) Javadoc linking between projects currently relies on projectSubstitutions. However, that is an extension variable that is not part of BuildPlugin. This commit moves the javadoc linking into the root build.gradle, alongside where projectSubstitutions are defined. --- build.gradle | 49 ++++++++++++------- .../elasticsearch/gradle/BuildPlugin.groovy | 24 +-------- 2 files changed, 32 insertions(+), 41 deletions(-) diff --git a/build.gradle b/build.gradle index 6453db4c0fb88..cfc8401a934e0 100644 --- a/build.gradle +++ b/build.gradle @@ -204,25 +204,15 @@ task branchConsistency { } subprojects { - project.afterEvaluate { - // ignore missing javadocs - tasks.withType(Javadoc) { Javadoc javadoc -> - // the -quiet here is because of a bug in gradle, in that adding a string option - // by itself is not added to the options. By adding quiet, both this option and - // the "value" -quiet is added, separated by a space. This is ok since the javadoc - // command already adds -quiet, so we are just duplicating it - // see https://discuss.gradle.org/t/add-custom-javadoc-option-that-does-not-take-an-argument/5959 - javadoc.options.encoding='UTF8' - javadoc.options.addStringOption('Xdoclint:all,-missing', '-quiet') - /* - TODO: building javadocs with java 9 b118 is currently broken with weird errors, so - for now this is commented out...try again with the next ea build... - javadoc.executable = new File(project.javaHome, 'bin/javadoc') - if (project.javaVersion == JavaVersion.VERSION_1_9) { - // TODO: remove this hack! gradle should be passing this... - javadoc.options.addStringOption('source', '8') - }*/ - } + // ignore missing javadocs + tasks.withType(Javadoc) { Javadoc javadoc -> + // the -quiet here is because of a bug in gradle, in that adding a string option + // by itself is not added to the options. By adding quiet, both this option and + // the "value" -quiet is added, separated by a space. This is ok since the javadoc + // command already adds -quiet, so we are just duplicating it + // see https://discuss.gradle.org/t/add-custom-javadoc-option-that-does-not-take-an-argument/5959 + javadoc.options.encoding='UTF8' + javadoc.options.addStringOption('Xdoclint:all,-missing', '-quiet') } /* Sets up the dependencies that we build as part of this project but @@ -280,6 +270,27 @@ subprojects { } } } + + // Handle javadoc dependencies across projects. Order matters: the linksOffline for + // org.elasticsearch:elasticsearch must be the last one or all the links for the + // other packages (e.g org.elasticsearch.client) will point to core rather than + // their own artifacts. + if (project.plugins.hasPlugin(BuildPlugin)) { + String artifactsHost = VersionProperties.elasticsearch.endsWith("-SNAPSHOT") ? "https://snapshots.elastic.co" : "https://artifacts.elastic.co" + Closure sortClosure = { a, b -> b.group <=> a.group } + Closure depJavadocClosure = { dep -> + if (dep.group != null && dep.group.startsWith('org.elasticsearch')) { + String substitution = project.ext.projectSubstitutions.get("${dep.group}:${dep.name}:${dep.version}") + if (substitution != null) { + project.javadoc.dependsOn substitution + ':javadoc' + String artifactPath = dep.group.replaceAll('\\.', '/') + '/' + dep.name.replaceAll('\\.', '/') + '/' + dep.version + project.javadoc.options.linksOffline artifactsHost + "/javadoc/" + artifactPath, "${project.project(substitution).buildDir}/docs/javadoc/" + } + } + } + project.configurations.compile.dependencies.findAll().toSorted(sortClosure).each(depJavadocClosure) + project.configurations.provided.dependencies.findAll().toSorted(sortClosure).each(depJavadocClosure) + } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index c86b2b6cb79ad..e836bd2fa2675 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -455,28 +455,8 @@ class BuildPlugin implements Plugin { } static void configureJavadoc(Project project) { - String artifactsHost = VersionProperties.elasticsearch.endsWith("-SNAPSHOT") ? "https://snapshots.elastic.co" : "https://artifacts.elastic.co" - project.afterEvaluate { - project.tasks.withType(Javadoc) { - executable = new File(project.javaHome, 'bin/javadoc') - } - /* - * Order matters, the linksOffline for org.elasticsearch:elasticsearch must be the last one - * or all the links for the other packages (e.g org.elasticsearch.client) will point to core rather than their own artifacts - */ - Closure sortClosure = { a, b -> b.group <=> a.group } - Closure depJavadocClosure = { dep -> - if (dep.group != null && dep.group.startsWith('org.elasticsearch')) { - String substitution = project.ext.projectSubstitutions.get("${dep.group}:${dep.name}:${dep.version}") - if (substitution != null) { - project.javadoc.dependsOn substitution + ':javadoc' - String artifactPath = dep.group.replaceAll('\\.', '/') + '/' + dep.name.replaceAll('\\.', '/') + '/' + dep.version - project.javadoc.options.linksOffline artifactsHost + "/javadoc/" + artifactPath, "${project.project(substitution).buildDir}/docs/javadoc/" - } - } - } - project.configurations.compile.dependencies.findAll().toSorted(sortClosure).each(depJavadocClosure) - project.configurations.provided.dependencies.findAll().toSorted(sortClosure).each(depJavadocClosure) + project.tasks.withType(Javadoc) { + executable = new File(project.javaHome, 'bin/javadoc') } configureJavadocJar(project) } From 9834081254ef8f81b492444089423020116a9076 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 11 Sep 2017 15:44:23 -0700 Subject: [PATCH 03/67] Fix reference to painless inside expression engine (#26528) This was a simple copy/paste bug in an earlier refactoring. --- .../elasticsearch/script/expression/ExpressionScriptEngine.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java index 8f3df14659a95..17dc2740ee426 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java @@ -108,7 +108,7 @@ protected Class loadClass(String name, boolean resolve) throws ClassNotFoundE ExecutableScript.Factory factory = (p) -> new ExpressionExecutableScript(expr, p); return context.factoryClazz.cast(factory); } - throw new IllegalArgumentException("painless does not know how to handle context [" + context.name + "]"); + throw new IllegalArgumentException("expression engine does not know how to handle script context [" + context.name + "]"); } private SearchScript.LeafFactory newSearchScript(Expression expr, SearchLookup lookup, @Nullable Map vars) { From 95d40758c2984b2396164bfb5712331b588c6118 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 11 Sep 2017 15:55:02 -0700 Subject: [PATCH 04/67] Build: Remove norelease from forbidden patterns (#26592) closes #26547 --- .../gradle/precommit/ForbiddenPatternsTask.groovy | 5 ----- 1 file changed, 5 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.groovy index ed62e88c567fa..e574d67f2ace1 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.groovy @@ -63,11 +63,6 @@ public class ForbiddenPatternsTask extends DefaultTask { patterns.put('nocommit should be all lowercase or all uppercase', /((?i)nocommit)(? Date: Tue, 12 Sep 2017 01:09:27 +0200 Subject: [PATCH 05/67] Azure repository: Accelerate the listing of files (used in delete snapshot) (#25710) This commit reworks the azure listing of snapshot files to do a single listing, instead of once per blob. closes #25424 --- .../storage/AzureStorageServiceImpl.java | 17 ++-- .../azure/AzureSnapshotRestoreTests.java | 77 ++++++++++++------- 2 files changed, 55 insertions(+), 39 deletions(-) diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java index c928d79c0c242..8268cba7f3e7f 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java @@ -24,6 +24,7 @@ import com.microsoft.azure.storage.RetryExponentialRetry; import com.microsoft.azure.storage.RetryPolicy; import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.BlobListingDetails; import com.microsoft.azure.storage.blob.BlobProperties; import com.microsoft.azure.storage.blob.CloudBlobClient; import com.microsoft.azure.storage.blob.CloudBlobContainer; @@ -45,6 +46,7 @@ import java.io.OutputStream; import java.net.URI; import java.net.URISyntaxException; +import java.util.EnumSet; import java.util.HashMap; import java.util.Map; @@ -291,33 +293,26 @@ public Map listBlobsByPrefix(String account, LocationMode logger.debug("listing container [{}], keyPath [{}], prefix [{}]", container, keyPath, prefix); MapBuilder blobsBuilder = MapBuilder.newMapBuilder(); + EnumSet enumBlobListingDetails = EnumSet.of(BlobListingDetails.METADATA); CloudBlobClient client = this.getSelectedClient(account, mode); CloudBlobContainer blobContainer = client.getContainerReference(container); - SocketAccess.doPrivilegedVoidException(() -> { if (blobContainer.exists()) { - for (ListBlobItem blobItem : blobContainer.listBlobs(keyPath + (prefix == null ? "" : prefix))) { + for (ListBlobItem blobItem : blobContainer.listBlobs(keyPath + (prefix == null ? "" : prefix), false, + enumBlobListingDetails, null, null)) { URI uri = blobItem.getUri(); logger.trace("blob url [{}]", uri); // uri.getPath is of the form /container/keyPath.* and we want to strip off the /container/ // this requires 1 + container.length() + 1, with each 1 corresponding to one of the / String blobPath = uri.getPath().substring(1 + container.length() + 1); - - CloudBlockBlob blob = blobContainer.getBlockBlobReference(blobPath); - - // fetch the blob attributes from Azure (getBlockBlobReference does not do this) - // this is needed to retrieve the blob length (among other metadata) from Azure Storage - blob.downloadAttributes(); - - BlobProperties properties = blob.getProperties(); + BlobProperties properties = ((CloudBlockBlob) blobItem).getProperties(); String name = blobPath.substring(keyPath.length()); logger.trace("blob url [{}], name [{}], size [{}]", uri, name, properties.getLength()); blobsBuilder.put(name, new PlainBlobMetaData(name, properties.getLength())); } } }); - return blobsBuilder.immutableMap(); } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java index aea47f38ef3ef..7eb808e7c956e 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java @@ -36,18 +36,24 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.plugin.repository.azure.AzureRepositoryPlugin; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.repositories.RepositoryVerificationException; import org.elasticsearch.repositories.azure.AzureRepository.Repository; import org.elasticsearch.snapshots.SnapshotMissingException; +import org.elasticsearch.snapshots.SnapshotRestoreException; import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.store.MockFSDirectoryService; +import org.elasticsearch.test.store.MockFSIndexStore; import org.junit.After; import org.junit.Before; import java.net.URISyntaxException; +import java.util.Arrays; +import java.util.Collection; import java.util.Locale; import java.util.concurrent.TimeUnit; @@ -65,13 +71,24 @@ supportsDedicatedMasters = false, numDataNodes = 1, transportClientRatio = 0.0) public class AzureSnapshotRestoreTests extends AbstractAzureWithThirdPartyIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(AzureRepositoryPlugin.class, MockFSIndexStore.TestPlugin.class); + } + private String getRepositoryPath() { String testName = "it-" + getTestName(); return testName.contains(" ") ? Strings.split(testName, " ")[0] : testName; } public static String getContainerName() { - String testName = "snapshot-itest-".concat(RandomizedTest.getContext().getRunnerSeedAsString().toLowerCase(Locale.ROOT)); + /* Have a different name per test so that there is no possible race condition. As the long can be negative, + * there mustn't be a hyphen between the 2 concatenated numbers + * (can't have 2 consecutives hyphens on Azure containers) + */ + String testName = "snapshot-itest-" + .concat(RandomizedTest.getContext().getRunnerSeedAsString().toLowerCase(Locale.ROOT)); return testName.contains(" ") ? Strings.split(testName, " ")[0] : testName; } @@ -95,9 +112,10 @@ public final void wipeAzureRepositories() throws StorageException, URISyntaxExce } public void testSimpleWorkflow() { + String repo_name = "test-repo-simple"; Client client = client(); logger.info("--> creating azure repository with path [{}]", getRepositoryPath()); - PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") + PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository(repo_name) .setType("azure").setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) @@ -120,13 +138,13 @@ public void testSimpleWorkflow() { assertThat(client.prepareSearch("test-idx-3").setSize(0).get().getHits().getTotalHits(), equalTo(100L)); logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot(repo_name, "test-snap") .setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); - assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots() + assertThat(client.admin().cluster().prepareGetSnapshots(repo_name).setSnapshots("test-snap").get().getSnapshots() .get(0).state(), equalTo(SnapshotState.SUCCESS)); logger.info("--> delete some data"); @@ -148,7 +166,7 @@ public void testSimpleWorkflow() { client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get(); logger.info("--> restore all indices from the snapshot"); - RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") + RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot(repo_name, "test-snap") .setWaitForCompletion(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); @@ -161,7 +179,7 @@ public void testSimpleWorkflow() { logger.info("--> delete indices"); cluster().wipeIndices("test-idx-1", "test-idx-2"); logger.info("--> restore one index after deletion"); - restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true) + restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot(repo_name, "test-snap").setWaitForCompletion(true) .setIndices("test-idx-*", "-test-idx-2").get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); @@ -177,7 +195,7 @@ public void testSimpleWorkflow() { public void testMultipleSnapshots() throws URISyntaxException, StorageException { final String indexName = "test-idx-1"; final String typeName = "doc"; - final String repositoryName = "test-repo"; + final String repositoryName = "test-repo-multiple-snapshot"; final String snapshot1Name = "test-snap-1"; final String snapshot2Name = "test-snap-2"; @@ -314,6 +332,7 @@ public void testMultipleRepositories() { * For issue #26: https://github.com/elastic/elasticsearch-cloud-azure/issues/26 */ public void testListBlobs_26() throws StorageException, URISyntaxException { + final String repositoryName="test-repo-26"; createIndex("test-idx-1", "test-idx-2", "test-idx-3"); ensureGreen(); @@ -327,29 +346,29 @@ public void testListBlobs_26() throws StorageException, URISyntaxException { ClusterAdminClient client = client().admin().cluster(); logger.info("--> creating azure repository without any path"); - PutRepositoryResponse putRepositoryResponse = client.preparePutRepository("test-repo").setType("azure") + PutRepositoryResponse putRepositoryResponse = client.preparePutRepository(repositoryName).setType("azure") .setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); // Get all snapshots - should be empty - assertThat(client.prepareGetSnapshots("test-repo").get().getSnapshots().size(), equalTo(0)); + assertThat(client.prepareGetSnapshots(repositoryName).get().getSnapshots().size(), equalTo(0)); logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.prepareCreateSnapshot("test-repo", "test-snap-26") + CreateSnapshotResponse createSnapshotResponse = client.prepareCreateSnapshot(repositoryName, "test-snap-26") .setWaitForCompletion(true).setIndices("test-idx-*").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); // Get all snapshots - should have one - assertThat(client.prepareGetSnapshots("test-repo").get().getSnapshots().size(), equalTo(1)); + assertThat(client.prepareGetSnapshots(repositoryName).get().getSnapshots().size(), equalTo(1)); // Clean the snapshot - client.prepareDeleteSnapshot("test-repo", "test-snap-26").get(); - client.prepareDeleteRepository("test-repo").get(); + client.prepareDeleteSnapshot(repositoryName, "test-snap-26").get(); + client.prepareDeleteRepository(repositoryName).get(); logger.info("--> creating azure repository path [{}]", getRepositoryPath()); - putRepositoryResponse = client.preparePutRepository("test-repo").setType("azure") + putRepositoryResponse = client.preparePutRepository(repositoryName).setType("azure") .setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) @@ -357,15 +376,15 @@ public void testListBlobs_26() throws StorageException, URISyntaxException { assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); // Get all snapshots - should be empty - assertThat(client.prepareGetSnapshots("test-repo").get().getSnapshots().size(), equalTo(0)); + assertThat(client.prepareGetSnapshots(repositoryName).get().getSnapshots().size(), equalTo(0)); logger.info("--> snapshot"); - createSnapshotResponse = client.prepareCreateSnapshot("test-repo", "test-snap-26").setWaitForCompletion(true) + createSnapshotResponse = client.prepareCreateSnapshot(repositoryName, "test-snap-26").setWaitForCompletion(true) .setIndices("test-idx-*").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); // Get all snapshots - should have one - assertThat(client.prepareGetSnapshots("test-repo").get().getSnapshots().size(), equalTo(1)); + assertThat(client.prepareGetSnapshots(repositoryName).get().getSnapshots().size(), equalTo(1)); } @@ -374,23 +393,24 @@ public void testListBlobs_26() throws StorageException, URISyntaxException { * For issue #28: https://github.com/elastic/elasticsearch-cloud-azure/issues/28 */ public void testGetDeleteNonExistingSnapshot_28() throws StorageException, URISyntaxException { + final String repositoryName="test-repo-28"; ClusterAdminClient client = client().admin().cluster(); logger.info("--> creating azure repository without any path"); - PutRepositoryResponse putRepositoryResponse = client.preparePutRepository("test-repo").setType("azure") + PutRepositoryResponse putRepositoryResponse = client.preparePutRepository(repositoryName).setType("azure") .setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); try { - client.prepareGetSnapshots("test-repo").addSnapshots("nonexistingsnapshotname").get(); + client.prepareGetSnapshots(repositoryName).addSnapshots("nonexistingsnapshotname").get(); fail("Shouldn't be here"); } catch (SnapshotMissingException ex) { // Expected } try { - client.prepareDeleteSnapshot("test-repo", "nonexistingsnapshotname").get(); + client.prepareDeleteSnapshot(repositoryName, "nonexistingsnapshotname").get(); fail("Shouldn't be here"); } catch (SnapshotMissingException ex) { // Expected @@ -419,18 +439,19 @@ public void testForbiddenContainerName() throws Exception { * @param correct Is this container name correct */ private void checkContainerName(final String container, final boolean correct) throws Exception { + String repositoryName = "test-repo-checkContainerName"; logger.info("--> creating azure repository with container name [{}]", container); // It could happen that we just removed from a previous test the same container so // we can not create it yet. assertBusy(() -> { try { - PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo") + PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository(repositoryName) .setType("azure").setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), container) .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) .put(Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000), ByteSizeUnit.BYTES) ).get(); - client().admin().cluster().prepareDeleteRepository("test-repo").get(); + client().admin().cluster().prepareDeleteRepository(repositoryName).get(); try { logger.info("--> remove container [{}]", container); cleanRepositoryFiles(container); @@ -451,9 +472,10 @@ private void checkContainerName(final String container, final boolean correct) t * Test case for issue #23: https://github.com/elastic/elasticsearch-cloud-azure/issues/23 */ public void testNonExistingRepo_23() { + final String repositoryName = "test-repo-test23"; Client client = client(); logger.info("--> creating azure repository with path [{}]", getRepositoryPath()); - PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") + PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository(repositoryName) .setType("azure").setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) @@ -463,9 +485,9 @@ public void testNonExistingRepo_23() { logger.info("--> restore non existing snapshot"); try { - client.admin().cluster().prepareRestoreSnapshot("test-repo", "no-existing-snapshot").setWaitForCompletion(true).get(); + client.admin().cluster().prepareRestoreSnapshot(repositoryName, "no-existing-snapshot").setWaitForCompletion(true).get(); fail("Shouldn't be here"); - } catch (SnapshotMissingException ex) { + } catch (SnapshotRestoreException ex) { // Expected } } @@ -475,9 +497,8 @@ public void testNonExistingRepo_23() { */ public void testRemoveAndCreateContainer() throws Exception { final String container = getContainerName().concat("-testremove"); - final AzureStorageService storageService = new AzureStorageServiceImpl(internalCluster().getDefaultSettings(), - AzureStorageSettings.load(internalCluster().getDefaultSettings())); - + final AzureStorageService storageService = new AzureStorageServiceImpl(nodeSettings(0),AzureStorageSettings.load(nodeSettings(0))); + // It could happen that we run this test really close to a previous one // so we might need some time to be able to create the container assertBusy(() -> { From 06150d40a24a28f074eb5750b46e4586047990e6 Mon Sep 17 00:00:00 2001 From: mohit Date: Tue, 12 Sep 2017 01:34:12 -0700 Subject: [PATCH 06/67] update AWS SDK for ECS Task IAM support in discovery-ec2 (#26479) This commit contains: * update AWS SDK for ECS Task IAM support * ignore dependencies not essential to `discovery-ec2`: * jmespath seems to be used for `waiters` * amazon ion is a protocol not used by EC2 or IAM --- plugins/discovery-ec2/build.gradle | 22 +- .../aws-java-sdk-core-1.10.69.jar.sha1 | 1 - .../aws-java-sdk-core-1.11.187.jar.sha1 | 1 + .../aws-java-sdk-ec2-1.10.69.jar.sha1 | 1 - .../aws-java-sdk-ec2-1.11.187.jar.sha1 | 1 + .../discovery/ec2/AmazonEC2Mock.java | 449 +++++++++++++++--- 6 files changed, 408 insertions(+), 67 deletions(-) delete mode 100644 plugins/discovery-ec2/licenses/aws-java-sdk-core-1.10.69.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.187.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.10.69.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.187.jar.sha1 diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 815e50f61c12c..f6941a9260e4b 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -23,7 +23,7 @@ esplugin { } versions << [ - 'aws': '1.10.69' + 'aws': '1.11.187' ] dependencies { @@ -55,6 +55,26 @@ test { thirdPartyAudit.excludes = [ // classes are missing + 'com.amazonaws.jmespath.JmesPathEvaluationVisitor', + 'com.amazonaws.jmespath.JmesPathExpression', + 'com.amazonaws.jmespath.JmesPathField', + 'com.amazonaws.jmespath.JmesPathFlatten', + 'com.amazonaws.jmespath.JmesPathIdentity', + 'com.amazonaws.jmespath.JmesPathLengthFunction', + 'com.amazonaws.jmespath.JmesPathLiteral', + 'com.amazonaws.jmespath.JmesPathProjection', + 'com.amazonaws.jmespath.JmesPathSubExpression', + 'com.amazonaws.jmespath.ObjectMapperSingleton', + 'com.amazonaws.jmespath.OpGreaterThan', + 'software.amazon.ion.IonReader', + 'software.amazon.ion.IonSystem', + 'software.amazon.ion.IonType', + 'software.amazon.ion.IonWriter', + 'software.amazon.ion.Timestamp', + 'software.amazon.ion.system.IonBinaryWriterBuilder', + 'software.amazon.ion.system.IonSystemBuilder', + 'software.amazon.ion.system.IonTextWriterBuilder', + 'software.amazon.ion.system.IonWriterBuilder', 'javax.servlet.ServletContextEvent', 'javax.servlet.ServletContextListener', 'org.apache.avalon.framework.logger.Logger', diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.10.69.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.10.69.jar.sha1 deleted file mode 100644 index 2971a33d7d91b..0000000000000 --- a/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.10.69.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a1f02d5f26ba1d8c37e2bf9c847db3c6729dda00 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.187.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.187.jar.sha1 new file mode 100644 index 0000000000000..a5293a9bf6580 --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.187.jar.sha1 @@ -0,0 +1 @@ +6f47fcd3c2917bef69dc36aba203c5ea4af9bf24 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.10.69.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.10.69.jar.sha1 deleted file mode 100644 index a1a493d3b8f84..0000000000000 --- a/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.10.69.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -afbff1ece8365859eb4cfe0d3ba543d68b154d26 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.187.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.187.jar.sha1 new file mode 100644 index 0000000000000..4602436e08182 --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.187.jar.sha1 @@ -0,0 +1 @@ +f3e5a8601f3105624674b1a12ca34f453a4b5895 \ No newline at end of file diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java index 050a25bb18dc3..34ad449d06e8d 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java @@ -32,14 +32,27 @@ import com.amazonaws.services.ec2.model.AllocateHostsRequest; import com.amazonaws.services.ec2.model.AllocateHostsResult; import com.amazonaws.services.ec2.model.AssignPrivateIpAddressesRequest; +import com.amazonaws.services.ec2.model.AssignPrivateIpAddressesResult; +import com.amazonaws.services.ec2.model.AssignIpv6AddressesRequest; +import com.amazonaws.services.ec2.model.AssignIpv6AddressesResult; import com.amazonaws.services.ec2.model.AssociateAddressRequest; import com.amazonaws.services.ec2.model.AssociateAddressResult; +import com.amazonaws.services.ec2.model.AssociateVpcCidrBlockRequest; +import com.amazonaws.services.ec2.model.AssociateVpcCidrBlockResult; +import com.amazonaws.services.ec2.model.AssociateSubnetCidrBlockRequest; +import com.amazonaws.services.ec2.model.AssociateSubnetCidrBlockResult; +import com.amazonaws.services.ec2.model.AssociateIamInstanceProfileRequest; +import com.amazonaws.services.ec2.model.AssociateIamInstanceProfileResult; +import com.amazonaws.services.ec2.model.AcceptReservedInstancesExchangeQuoteRequest; +import com.amazonaws.services.ec2.model.AcceptReservedInstancesExchangeQuoteResult; import com.amazonaws.services.ec2.model.AssociateDhcpOptionsRequest; +import com.amazonaws.services.ec2.model.AssociateDhcpOptionsResult; import com.amazonaws.services.ec2.model.AssociateRouteTableRequest; import com.amazonaws.services.ec2.model.AssociateRouteTableResult; import com.amazonaws.services.ec2.model.AttachClassicLinkVpcRequest; import com.amazonaws.services.ec2.model.AttachClassicLinkVpcResult; import com.amazonaws.services.ec2.model.AttachInternetGatewayRequest; +import com.amazonaws.services.ec2.model.AttachInternetGatewayResult; import com.amazonaws.services.ec2.model.AttachNetworkInterfaceRequest; import com.amazonaws.services.ec2.model.AttachNetworkInterfaceResult; import com.amazonaws.services.ec2.model.AttachVolumeRequest; @@ -47,13 +60,17 @@ import com.amazonaws.services.ec2.model.AttachVpnGatewayRequest; import com.amazonaws.services.ec2.model.AttachVpnGatewayResult; import com.amazonaws.services.ec2.model.AuthorizeSecurityGroupEgressRequest; +import com.amazonaws.services.ec2.model.AuthorizeSecurityGroupEgressResult; import com.amazonaws.services.ec2.model.AuthorizeSecurityGroupIngressRequest; +import com.amazonaws.services.ec2.model.AuthorizeSecurityGroupIngressResult; import com.amazonaws.services.ec2.model.BundleInstanceRequest; import com.amazonaws.services.ec2.model.BundleInstanceResult; import com.amazonaws.services.ec2.model.CancelBundleTaskRequest; import com.amazonaws.services.ec2.model.CancelBundleTaskResult; import com.amazonaws.services.ec2.model.CancelConversionTaskRequest; +import com.amazonaws.services.ec2.model.CancelConversionTaskResult; import com.amazonaws.services.ec2.model.CancelExportTaskRequest; +import com.amazonaws.services.ec2.model.CancelExportTaskResult; import com.amazonaws.services.ec2.model.CancelImportTaskRequest; import com.amazonaws.services.ec2.model.CancelImportTaskResult; import com.amazonaws.services.ec2.model.CancelReservedInstancesListingRequest; @@ -69,9 +86,17 @@ import com.amazonaws.services.ec2.model.CopySnapshotRequest; import com.amazonaws.services.ec2.model.CopySnapshotResult; import com.amazonaws.services.ec2.model.CreateCustomerGatewayRequest; +import com.amazonaws.services.ec2.model.CreateDefaultVpcResult; +import com.amazonaws.services.ec2.model.CreateDefaultVpcRequest; import com.amazonaws.services.ec2.model.CreateCustomerGatewayResult; import com.amazonaws.services.ec2.model.CreateDhcpOptionsRequest; import com.amazonaws.services.ec2.model.CreateDhcpOptionsResult; +import com.amazonaws.services.ec2.model.CreateEgressOnlyInternetGatewayRequest; +import com.amazonaws.services.ec2.model.CreateEgressOnlyInternetGatewayResult; +import com.amazonaws.services.ec2.model.CreateFpgaImageRequest; +import com.amazonaws.services.ec2.model.CreateFpgaImageResult; +import com.amazonaws.services.ec2.model.CreateNetworkInterfacePermissionRequest; +import com.amazonaws.services.ec2.model.CreateNetworkInterfacePermissionResult; import com.amazonaws.services.ec2.model.CreateFlowLogsRequest; import com.amazonaws.services.ec2.model.CreateFlowLogsResult; import com.amazonaws.services.ec2.model.CreateImageRequest; @@ -85,11 +110,13 @@ import com.amazonaws.services.ec2.model.CreateNatGatewayRequest; import com.amazonaws.services.ec2.model.CreateNatGatewayResult; import com.amazonaws.services.ec2.model.CreateNetworkAclEntryRequest; +import com.amazonaws.services.ec2.model.CreateNetworkAclEntryResult; import com.amazonaws.services.ec2.model.CreateNetworkAclRequest; import com.amazonaws.services.ec2.model.CreateNetworkAclResult; import com.amazonaws.services.ec2.model.CreateNetworkInterfaceRequest; import com.amazonaws.services.ec2.model.CreateNetworkInterfaceResult; import com.amazonaws.services.ec2.model.CreatePlacementGroupRequest; +import com.amazonaws.services.ec2.model.CreatePlacementGroupResult; import com.amazonaws.services.ec2.model.CreateReservedInstancesListingRequest; import com.amazonaws.services.ec2.model.CreateReservedInstancesListingResult; import com.amazonaws.services.ec2.model.CreateRouteRequest; @@ -105,6 +132,7 @@ import com.amazonaws.services.ec2.model.CreateSubnetRequest; import com.amazonaws.services.ec2.model.CreateSubnetResult; import com.amazonaws.services.ec2.model.CreateTagsRequest; +import com.amazonaws.services.ec2.model.CreateTagsResult; import com.amazonaws.services.ec2.model.CreateVolumeRequest; import com.amazonaws.services.ec2.model.CreateVolumeResult; import com.amazonaws.services.ec2.model.CreateVpcEndpointRequest; @@ -116,37 +144,63 @@ import com.amazonaws.services.ec2.model.CreateVpnConnectionRequest; import com.amazonaws.services.ec2.model.CreateVpnConnectionResult; import com.amazonaws.services.ec2.model.CreateVpnConnectionRouteRequest; +import com.amazonaws.services.ec2.model.CreateVpnConnectionRouteResult; import com.amazonaws.services.ec2.model.CreateVpnGatewayRequest; import com.amazonaws.services.ec2.model.CreateVpnGatewayResult; import com.amazonaws.services.ec2.model.DeleteCustomerGatewayRequest; +import com.amazonaws.services.ec2.model.DeleteCustomerGatewayResult; import com.amazonaws.services.ec2.model.DeleteDhcpOptionsRequest; +import com.amazonaws.services.ec2.model.DeleteDhcpOptionsResult; +import com.amazonaws.services.ec2.model.DeleteEgressOnlyInternetGatewayRequest; +import com.amazonaws.services.ec2.model.DeleteEgressOnlyInternetGatewayResult; +import com.amazonaws.services.ec2.model.DeleteNetworkInterfacePermissionRequest; +import com.amazonaws.services.ec2.model.DeleteNetworkInterfacePermissionResult; import com.amazonaws.services.ec2.model.DeleteFlowLogsRequest; import com.amazonaws.services.ec2.model.DeleteFlowLogsResult; import com.amazonaws.services.ec2.model.DeleteInternetGatewayRequest; +import com.amazonaws.services.ec2.model.DeleteInternetGatewayResult; import com.amazonaws.services.ec2.model.DeleteKeyPairRequest; +import com.amazonaws.services.ec2.model.DeleteKeyPairResult; import com.amazonaws.services.ec2.model.DeleteNatGatewayRequest; import com.amazonaws.services.ec2.model.DeleteNatGatewayResult; import com.amazonaws.services.ec2.model.DeleteNetworkAclEntryRequest; +import com.amazonaws.services.ec2.model.DeleteNetworkAclEntryResult; import com.amazonaws.services.ec2.model.DeleteNetworkAclRequest; +import com.amazonaws.services.ec2.model.DeleteNetworkAclResult; import com.amazonaws.services.ec2.model.DeleteNetworkInterfaceRequest; +import com.amazonaws.services.ec2.model.DeleteNetworkInterfaceResult; import com.amazonaws.services.ec2.model.DeletePlacementGroupRequest; +import com.amazonaws.services.ec2.model.DeletePlacementGroupResult; import com.amazonaws.services.ec2.model.DeleteRouteRequest; +import com.amazonaws.services.ec2.model.DeleteRouteResult; import com.amazonaws.services.ec2.model.DeleteRouteTableRequest; +import com.amazonaws.services.ec2.model.DeleteRouteTableResult; import com.amazonaws.services.ec2.model.DeleteSecurityGroupRequest; +import com.amazonaws.services.ec2.model.DeleteSecurityGroupResult; import com.amazonaws.services.ec2.model.DeleteSnapshotRequest; +import com.amazonaws.services.ec2.model.DeleteSnapshotResult; import com.amazonaws.services.ec2.model.DeleteSpotDatafeedSubscriptionRequest; +import com.amazonaws.services.ec2.model.DeleteSpotDatafeedSubscriptionResult; import com.amazonaws.services.ec2.model.DeleteSubnetRequest; +import com.amazonaws.services.ec2.model.DeleteSubnetResult; import com.amazonaws.services.ec2.model.DeleteTagsRequest; +import com.amazonaws.services.ec2.model.DeleteTagsResult; import com.amazonaws.services.ec2.model.DeleteVolumeRequest; +import com.amazonaws.services.ec2.model.DeleteVolumeResult; import com.amazonaws.services.ec2.model.DeleteVpcEndpointsRequest; import com.amazonaws.services.ec2.model.DeleteVpcEndpointsResult; import com.amazonaws.services.ec2.model.DeleteVpcPeeringConnectionRequest; import com.amazonaws.services.ec2.model.DeleteVpcPeeringConnectionResult; import com.amazonaws.services.ec2.model.DeleteVpcRequest; +import com.amazonaws.services.ec2.model.DeleteVpcResult; import com.amazonaws.services.ec2.model.DeleteVpnConnectionRequest; +import com.amazonaws.services.ec2.model.DeleteVpnConnectionResult; import com.amazonaws.services.ec2.model.DeleteVpnConnectionRouteRequest; +import com.amazonaws.services.ec2.model.DeleteVpnConnectionRouteResult; import com.amazonaws.services.ec2.model.DeleteVpnGatewayRequest; +import com.amazonaws.services.ec2.model.DeleteVpnGatewayResult; import com.amazonaws.services.ec2.model.DeregisterImageRequest; +import com.amazonaws.services.ec2.model.DeregisterImageResult; import com.amazonaws.services.ec2.model.DescribeAccountAttributesRequest; import com.amazonaws.services.ec2.model.DescribeAccountAttributesResult; import com.amazonaws.services.ec2.model.DescribeAddressesRequest; @@ -163,12 +217,26 @@ import com.amazonaws.services.ec2.model.DescribeCustomerGatewaysResult; import com.amazonaws.services.ec2.model.DescribeDhcpOptionsRequest; import com.amazonaws.services.ec2.model.DescribeDhcpOptionsResult; +import com.amazonaws.services.ec2.model.DescribeEgressOnlyInternetGatewaysRequest; +import com.amazonaws.services.ec2.model.DescribeEgressOnlyInternetGatewaysResult; import com.amazonaws.services.ec2.model.DescribeExportTasksRequest; import com.amazonaws.services.ec2.model.DescribeExportTasksResult; +import com.amazonaws.services.ec2.model.DescribeElasticGpusRequest; +import com.amazonaws.services.ec2.model.DescribeElasticGpusResult; +import com.amazonaws.services.ec2.model.DescribeFpgaImagesRequest; +import com.amazonaws.services.ec2.model.DescribeFpgaImagesResult; +import com.amazonaws.services.ec2.model.DescribeHostReservationOfferingsRequest; +import com.amazonaws.services.ec2.model.DescribeHostReservationOfferingsResult; +import com.amazonaws.services.ec2.model.DescribeHostReservationsRequest; +import com.amazonaws.services.ec2.model.DescribeHostReservationsResult; +import com.amazonaws.services.ec2.model.DescribeIdentityIdFormatRequest; +import com.amazonaws.services.ec2.model.DescribeIdentityIdFormatResult; import com.amazonaws.services.ec2.model.DescribeFlowLogsRequest; import com.amazonaws.services.ec2.model.DescribeFlowLogsResult; import com.amazonaws.services.ec2.model.DescribeHostsRequest; import com.amazonaws.services.ec2.model.DescribeHostsResult; +import com.amazonaws.services.ec2.model.DescribeIamInstanceProfileAssociationsRequest; +import com.amazonaws.services.ec2.model.DescribeIamInstanceProfileAssociationsResult; import com.amazonaws.services.ec2.model.DescribeIdFormatRequest; import com.amazonaws.services.ec2.model.DescribeIdFormatResult; import com.amazonaws.services.ec2.model.DescribeImageAttributeRequest; @@ -199,6 +267,8 @@ import com.amazonaws.services.ec2.model.DescribeNetworkInterfaceAttributeResult; import com.amazonaws.services.ec2.model.DescribeNetworkInterfacesRequest; import com.amazonaws.services.ec2.model.DescribeNetworkInterfacesResult; +import com.amazonaws.services.ec2.model.DescribeNetworkInterfacePermissionsRequest; +import com.amazonaws.services.ec2.model.DescribeNetworkInterfacePermissionsResult; import com.amazonaws.services.ec2.model.DescribePlacementGroupsRequest; import com.amazonaws.services.ec2.model.DescribePlacementGroupsResult; import com.amazonaws.services.ec2.model.DescribePrefixListsRequest; @@ -221,6 +291,10 @@ import com.amazonaws.services.ec2.model.DescribeScheduledInstancesResult; import com.amazonaws.services.ec2.model.DescribeSecurityGroupsRequest; import com.amazonaws.services.ec2.model.DescribeSecurityGroupsResult; +import com.amazonaws.services.ec2.model.DescribeStaleSecurityGroupsRequest; +import com.amazonaws.services.ec2.model.DescribeStaleSecurityGroupsResult; +import com.amazonaws.services.ec2.model.DescribeSecurityGroupReferencesRequest; +import com.amazonaws.services.ec2.model.DescribeSecurityGroupReferencesResult; import com.amazonaws.services.ec2.model.DescribeSnapshotAttributeRequest; import com.amazonaws.services.ec2.model.DescribeSnapshotAttributeResult; import com.amazonaws.services.ec2.model.DescribeSnapshotsRequest; @@ -245,6 +319,8 @@ import com.amazonaws.services.ec2.model.DescribeVolumeAttributeResult; import com.amazonaws.services.ec2.model.DescribeVolumeStatusRequest; import com.amazonaws.services.ec2.model.DescribeVolumeStatusResult; +import com.amazonaws.services.ec2.model.DescribeVolumesModificationsRequest; +import com.amazonaws.services.ec2.model.DescribeVolumesModificationsResult; import com.amazonaws.services.ec2.model.DescribeVolumesRequest; import com.amazonaws.services.ec2.model.DescribeVolumesResult; import com.amazonaws.services.ec2.model.DescribeVpcAttributeRequest; @@ -268,21 +344,35 @@ import com.amazonaws.services.ec2.model.DetachClassicLinkVpcRequest; import com.amazonaws.services.ec2.model.DetachClassicLinkVpcResult; import com.amazonaws.services.ec2.model.DetachInternetGatewayRequest; +import com.amazonaws.services.ec2.model.DetachInternetGatewayResult; import com.amazonaws.services.ec2.model.DetachNetworkInterfaceRequest; +import com.amazonaws.services.ec2.model.DetachNetworkInterfaceResult; import com.amazonaws.services.ec2.model.DetachVolumeRequest; import com.amazonaws.services.ec2.model.DetachVolumeResult; import com.amazonaws.services.ec2.model.DetachVpnGatewayRequest; +import com.amazonaws.services.ec2.model.DetachVpnGatewayResult; import com.amazonaws.services.ec2.model.DisableVgwRoutePropagationRequest; +import com.amazonaws.services.ec2.model.DisableVgwRoutePropagationResult; import com.amazonaws.services.ec2.model.DisableVpcClassicLinkDnsSupportRequest; import com.amazonaws.services.ec2.model.DisableVpcClassicLinkDnsSupportResult; import com.amazonaws.services.ec2.model.DisableVpcClassicLinkRequest; import com.amazonaws.services.ec2.model.DisableVpcClassicLinkResult; import com.amazonaws.services.ec2.model.DisassociateAddressRequest; +import com.amazonaws.services.ec2.model.DisassociateAddressResult; import com.amazonaws.services.ec2.model.DisassociateRouteTableRequest; +import com.amazonaws.services.ec2.model.DisassociateRouteTableResult; +import com.amazonaws.services.ec2.model.DisassociateIamInstanceProfileRequest; +import com.amazonaws.services.ec2.model.DisassociateIamInstanceProfileResult; +import com.amazonaws.services.ec2.model.DisassociateVpcCidrBlockRequest; +import com.amazonaws.services.ec2.model.DisassociateVpcCidrBlockResult; +import com.amazonaws.services.ec2.model.DisassociateSubnetCidrBlockRequest; +import com.amazonaws.services.ec2.model.DisassociateSubnetCidrBlockResult; import com.amazonaws.services.ec2.model.DryRunResult; import com.amazonaws.services.ec2.model.DryRunSupportedRequest; import com.amazonaws.services.ec2.model.EnableVgwRoutePropagationRequest; +import com.amazonaws.services.ec2.model.EnableVgwRoutePropagationResult; import com.amazonaws.services.ec2.model.EnableVolumeIORequest; +import com.amazonaws.services.ec2.model.EnableVolumeIOResult; import com.amazonaws.services.ec2.model.EnableVpcClassicLinkDnsSupportRequest; import com.amazonaws.services.ec2.model.EnableVpcClassicLinkDnsSupportResult; import com.amazonaws.services.ec2.model.EnableVpcClassicLinkRequest; @@ -290,8 +380,14 @@ import com.amazonaws.services.ec2.model.Filter; import com.amazonaws.services.ec2.model.GetConsoleOutputRequest; import com.amazonaws.services.ec2.model.GetConsoleOutputResult; +import com.amazonaws.services.ec2.model.GetConsoleScreenshotRequest; +import com.amazonaws.services.ec2.model.GetConsoleScreenshotResult; +import com.amazonaws.services.ec2.model.GetHostReservationPurchasePreviewRequest; +import com.amazonaws.services.ec2.model.GetHostReservationPurchasePreviewResult; import com.amazonaws.services.ec2.model.GetPasswordDataRequest; import com.amazonaws.services.ec2.model.GetPasswordDataResult; +import com.amazonaws.services.ec2.model.GetReservedInstancesExchangeQuoteRequest; +import com.amazonaws.services.ec2.model.GetReservedInstancesExchangeQuoteResult; import com.amazonaws.services.ec2.model.ImportImageRequest; import com.amazonaws.services.ec2.model.ImportImageResult; import com.amazonaws.services.ec2.model.ImportInstanceRequest; @@ -308,19 +404,31 @@ import com.amazonaws.services.ec2.model.ModifyHostsRequest; import com.amazonaws.services.ec2.model.ModifyHostsResult; import com.amazonaws.services.ec2.model.ModifyIdFormatRequest; +import com.amazonaws.services.ec2.model.ModifyIdFormatResult; import com.amazonaws.services.ec2.model.ModifyImageAttributeRequest; +import com.amazonaws.services.ec2.model.ModifyImageAttributeResult; import com.amazonaws.services.ec2.model.ModifyInstanceAttributeRequest; +import com.amazonaws.services.ec2.model.ModifyInstanceAttributeResult; import com.amazonaws.services.ec2.model.ModifyInstancePlacementRequest; import com.amazonaws.services.ec2.model.ModifyInstancePlacementResult; +import com.amazonaws.services.ec2.model.ModifyIdentityIdFormatRequest; +import com.amazonaws.services.ec2.model.ModifyIdentityIdFormatResult; import com.amazonaws.services.ec2.model.ModifyNetworkInterfaceAttributeRequest; +import com.amazonaws.services.ec2.model.ModifyNetworkInterfaceAttributeResult; import com.amazonaws.services.ec2.model.ModifyReservedInstancesRequest; import com.amazonaws.services.ec2.model.ModifyReservedInstancesResult; import com.amazonaws.services.ec2.model.ModifySnapshotAttributeRequest; +import com.amazonaws.services.ec2.model.ModifySnapshotAttributeResult; import com.amazonaws.services.ec2.model.ModifySpotFleetRequestRequest; import com.amazonaws.services.ec2.model.ModifySpotFleetRequestResult; import com.amazonaws.services.ec2.model.ModifySubnetAttributeRequest; +import com.amazonaws.services.ec2.model.ModifySubnetAttributeResult; import com.amazonaws.services.ec2.model.ModifyVolumeAttributeRequest; +import com.amazonaws.services.ec2.model.ModifyVolumeAttributeResult; +import com.amazonaws.services.ec2.model.ModifyVolumeRequest; +import com.amazonaws.services.ec2.model.ModifyVolumeResult; import com.amazonaws.services.ec2.model.ModifyVpcAttributeRequest; +import com.amazonaws.services.ec2.model.ModifyVpcAttributeResult; import com.amazonaws.services.ec2.model.ModifyVpcEndpointRequest; import com.amazonaws.services.ec2.model.ModifyVpcEndpointResult; import com.amazonaws.services.ec2.model.MonitorInstancesRequest; @@ -331,34 +439,51 @@ import com.amazonaws.services.ec2.model.PurchaseReservedInstancesOfferingResult; import com.amazonaws.services.ec2.model.PurchaseScheduledInstancesRequest; import com.amazonaws.services.ec2.model.PurchaseScheduledInstancesResult; +import com.amazonaws.services.ec2.model.PurchaseHostReservationRequest; +import com.amazonaws.services.ec2.model.PurchaseHostReservationResult; import com.amazonaws.services.ec2.model.RebootInstancesRequest; +import com.amazonaws.services.ec2.model.RebootInstancesResult; import com.amazonaws.services.ec2.model.RegisterImageRequest; import com.amazonaws.services.ec2.model.RegisterImageResult; import com.amazonaws.services.ec2.model.RejectVpcPeeringConnectionRequest; import com.amazonaws.services.ec2.model.RejectVpcPeeringConnectionResult; +import com.amazonaws.services.ec2.model.ModifyVpcPeeringConnectionOptionsRequest; +import com.amazonaws.services.ec2.model.ModifyVpcPeeringConnectionOptionsResult; import com.amazonaws.services.ec2.model.ReleaseAddressRequest; +import com.amazonaws.services.ec2.model.ReleaseAddressResult; import com.amazonaws.services.ec2.model.ReleaseHostsRequest; import com.amazonaws.services.ec2.model.ReleaseHostsResult; +import com.amazonaws.services.ec2.model.ReplaceIamInstanceProfileAssociationRequest; +import com.amazonaws.services.ec2.model.ReplaceIamInstanceProfileAssociationResult; import com.amazonaws.services.ec2.model.ReplaceNetworkAclAssociationRequest; import com.amazonaws.services.ec2.model.ReplaceNetworkAclAssociationResult; import com.amazonaws.services.ec2.model.ReplaceNetworkAclEntryRequest; +import com.amazonaws.services.ec2.model.ReplaceNetworkAclEntryResult; import com.amazonaws.services.ec2.model.ReplaceRouteRequest; +import com.amazonaws.services.ec2.model.ReplaceRouteResult; import com.amazonaws.services.ec2.model.ReplaceRouteTableAssociationRequest; import com.amazonaws.services.ec2.model.ReplaceRouteTableAssociationResult; import com.amazonaws.services.ec2.model.ReportInstanceStatusRequest; +import com.amazonaws.services.ec2.model.ReportInstanceStatusResult; import com.amazonaws.services.ec2.model.RequestSpotFleetRequest; import com.amazonaws.services.ec2.model.RequestSpotFleetResult; import com.amazonaws.services.ec2.model.RequestSpotInstancesRequest; import com.amazonaws.services.ec2.model.RequestSpotInstancesResult; import com.amazonaws.services.ec2.model.Reservation; import com.amazonaws.services.ec2.model.ResetImageAttributeRequest; +import com.amazonaws.services.ec2.model.ResetImageAttributeResult; import com.amazonaws.services.ec2.model.ResetInstanceAttributeRequest; +import com.amazonaws.services.ec2.model.ResetInstanceAttributeResult; import com.amazonaws.services.ec2.model.ResetNetworkInterfaceAttributeRequest; +import com.amazonaws.services.ec2.model.ResetNetworkInterfaceAttributeResult; import com.amazonaws.services.ec2.model.ResetSnapshotAttributeRequest; +import com.amazonaws.services.ec2.model.ResetSnapshotAttributeResult; import com.amazonaws.services.ec2.model.RestoreAddressToClassicRequest; import com.amazonaws.services.ec2.model.RestoreAddressToClassicResult; import com.amazonaws.services.ec2.model.RevokeSecurityGroupEgressRequest; +import com.amazonaws.services.ec2.model.RevokeSecurityGroupEgressResult; import com.amazonaws.services.ec2.model.RevokeSecurityGroupIngressRequest; +import com.amazonaws.services.ec2.model.RevokeSecurityGroupIngressResult; import com.amazonaws.services.ec2.model.RunInstancesRequest; import com.amazonaws.services.ec2.model.RunInstancesResult; import com.amazonaws.services.ec2.model.RunScheduledInstancesRequest; @@ -370,9 +495,17 @@ import com.amazonaws.services.ec2.model.Tag; import com.amazonaws.services.ec2.model.TerminateInstancesRequest; import com.amazonaws.services.ec2.model.TerminateInstancesResult; +import com.amazonaws.services.ec2.model.UnassignIpv6AddressesRequest; +import com.amazonaws.services.ec2.model.UnassignIpv6AddressesResult; import com.amazonaws.services.ec2.model.UnassignPrivateIpAddressesRequest; +import com.amazonaws.services.ec2.model.UnassignPrivateIpAddressesResult; import com.amazonaws.services.ec2.model.UnmonitorInstancesRequest; import com.amazonaws.services.ec2.model.UnmonitorInstancesResult; +import com.amazonaws.services.ec2.model.UpdateSecurityGroupRuleDescriptionsEgressRequest; +import com.amazonaws.services.ec2.model.UpdateSecurityGroupRuleDescriptionsEgressResult; +import com.amazonaws.services.ec2.model.UpdateSecurityGroupRuleDescriptionsIngressRequest; +import com.amazonaws.services.ec2.model.UpdateSecurityGroupRuleDescriptionsIngressResult; +import com.amazonaws.services.ec2.waiters.AmazonEC2Waiters; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.ESLoggerFactory; @@ -518,7 +651,13 @@ public void setRegion(Region region) throws IllegalArgumentException { } @Override - public void rebootInstances(RebootInstancesRequest rebootInstancesRequest) throws AmazonServiceException, AmazonClientException { + public AcceptReservedInstancesExchangeQuoteResult acceptReservedInstancesExchangeQuote( + AcceptReservedInstancesExchangeQuoteRequest acceptReservedInstancesExchangeQuoteRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public RebootInstancesResult rebootInstances(RebootInstancesRequest rebootInstancesRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -548,7 +687,7 @@ public DetachVolumeResult detachVolume(DetachVolumeRequest detachVolumeRequest) } @Override - public void deleteKeyPair(DeleteKeyPairRequest deleteKeyPairRequest) throws AmazonServiceException, AmazonClientException { + public DeleteKeyPairResult deleteKeyPair(DeleteKeyPairRequest deleteKeyPairRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -562,6 +701,16 @@ public UnmonitorInstancesResult unmonitorInstances(UnmonitorInstancesRequest unm throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public UpdateSecurityGroupRuleDescriptionsIngressResult updateSecurityGroupRuleDescriptionsIngress(UpdateSecurityGroupRuleDescriptionsIngressRequest updateSecurityGroupRuleDescriptionsIngressRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public UpdateSecurityGroupRuleDescriptionsEgressResult updateSecurityGroupRuleDescriptionsEgress(UpdateSecurityGroupRuleDescriptionsEgressRequest updateSecurityGroupRuleDescriptionsEgressRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public AttachVpnGatewayResult attachVpnGateway(AttachVpnGatewayRequest attachVpnGatewayRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); @@ -573,7 +722,7 @@ public CreateImageResult createImage(CreateImageRequest createImageRequest) thro } @Override - public void deleteSecurityGroup(DeleteSecurityGroupRequest deleteSecurityGroupRequest) throws AmazonServiceException, AmazonClientException { + public DeleteSecurityGroupResult deleteSecurityGroup(DeleteSecurityGroupRequest deleteSecurityGroupRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -583,12 +732,12 @@ public CreateInstanceExportTaskResult createInstanceExportTask(CreateInstanceExp } @Override - public void authorizeSecurityGroupEgress(AuthorizeSecurityGroupEgressRequest authorizeSecurityGroupEgressRequest) throws AmazonServiceException, AmazonClientException { + public AuthorizeSecurityGroupEgressResult authorizeSecurityGroupEgress(AuthorizeSecurityGroupEgressRequest authorizeSecurityGroupEgressRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void associateDhcpOptions(AssociateDhcpOptionsRequest associateDhcpOptionsRequest) throws AmazonServiceException, AmazonClientException { + public AssociateDhcpOptionsResult associateDhcpOptions(AssociateDhcpOptionsRequest associateDhcpOptionsRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -597,6 +746,11 @@ public GetPasswordDataResult getPasswordData(GetPasswordDataRequest getPasswordD throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public GetReservedInstancesExchangeQuoteResult getReservedInstancesExchangeQuote(GetReservedInstancesExchangeQuoteRequest getReservedInstancesExchangeQuoteRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public StopInstancesResult stopInstances(StopInstancesRequest stopInstancesRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); @@ -608,12 +762,12 @@ public ImportKeyPairResult importKeyPair(ImportKeyPairRequest importKeyPairReque } @Override - public void deleteNetworkInterface(DeleteNetworkInterfaceRequest deleteNetworkInterfaceRequest) throws AmazonServiceException, AmazonClientException { + public DeleteNetworkInterfaceResult deleteNetworkInterface(DeleteNetworkInterfaceRequest deleteNetworkInterfaceRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void modifyVpcAttribute(ModifyVpcAttributeRequest modifyVpcAttributeRequest) throws AmazonServiceException, AmazonClientException { + public ModifyVpcAttributeResult modifyVpcAttribute(ModifyVpcAttributeRequest modifyVpcAttributeRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -637,6 +791,11 @@ public DescribeNetworkInterfacesResult describeNetworkInterfaces(DescribeNetwork throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public DescribeNetworkInterfacePermissionsResult describeNetworkInterfacePermissions(DescribeNetworkInterfacePermissionsRequest describeNetworkInterfacePermissionsRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public DescribeRegionsResult describeRegions(DescribeRegionsRequest describeRegionsRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); @@ -658,12 +817,12 @@ public DeleteVpcEndpointsResult deleteVpcEndpoints(DeleteVpcEndpointsRequest del } @Override - public void resetSnapshotAttribute(ResetSnapshotAttributeRequest resetSnapshotAttributeRequest) throws AmazonServiceException, AmazonClientException { + public ResetSnapshotAttributeResult resetSnapshotAttribute(ResetSnapshotAttributeRequest resetSnapshotAttributeRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void deleteRoute(DeleteRouteRequest deleteRouteRequest) throws AmazonServiceException, AmazonClientException { + public DeleteRouteResult deleteRoute(DeleteRouteRequest deleteRouteRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -683,7 +842,7 @@ public ModifyHostsResult modifyHosts(ModifyHostsRequest modifyHostsRequest) { } @Override - public void modifyIdFormat(ModifyIdFormatRequest modifyIdFormatRequest) { + public ModifyIdFormatResult modifyIdFormat(ModifyIdFormatRequest modifyIdFormatRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @@ -692,23 +851,38 @@ public DescribeSecurityGroupsResult describeSecurityGroups(DescribeSecurityGroup throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public DescribeStaleSecurityGroupsResult describeStaleSecurityGroups(DescribeStaleSecurityGroupsRequest describeStaleSecurityGroupsRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeSecurityGroupReferencesResult describeSecurityGroupReferences(DescribeSecurityGroupReferencesRequest describeSecurityGroupReferencesRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public RejectVpcPeeringConnectionResult rejectVpcPeeringConnection(RejectVpcPeeringConnectionRequest rejectVpcPeeringConnectionRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public ModifyVpcPeeringConnectionOptionsResult modifyVpcPeeringConnectionOptions(ModifyVpcPeeringConnectionOptionsRequest modifyVpcPeeringConnectionOptionsRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public DeleteFlowLogsResult deleteFlowLogs(DeleteFlowLogsRequest deleteFlowLogsRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void detachVpnGateway(DetachVpnGatewayRequest detachVpnGatewayRequest) throws AmazonServiceException, AmazonClientException { + public DetachVpnGatewayResult detachVpnGateway(DetachVpnGatewayRequest detachVpnGatewayRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void deregisterImage(DeregisterImageRequest deregisterImageRequest) throws AmazonServiceException, AmazonClientException { + public DeregisterImageResult deregisterImage(DeregisterImageRequest deregisterImageRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -718,12 +892,12 @@ public DescribeSpotDatafeedSubscriptionResult describeSpotDatafeedSubscription(D } @Override - public void deleteTags(DeleteTagsRequest deleteTagsRequest) throws AmazonServiceException, AmazonClientException { + public DeleteTagsResult deleteTags(DeleteTagsRequest deleteTagsRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void deleteSubnet(DeleteSubnetRequest deleteSubnetRequest) throws AmazonServiceException, AmazonClientException { + public DeleteSubnetResult deleteSubnet(DeleteSubnetRequest deleteSubnetRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -743,7 +917,7 @@ public CreateVpnGatewayResult createVpnGateway(CreateVpnGatewayRequest createVpn } @Override - public void enableVolumeIO(EnableVolumeIORequest enableVolumeIORequest) throws AmazonServiceException, AmazonClientException { + public EnableVolumeIOResult enableVolumeIO(EnableVolumeIORequest enableVolumeIORequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -753,7 +927,7 @@ public MoveAddressToVpcResult moveAddressToVpc(MoveAddressToVpcRequest moveAddre } @Override - public void deleteVpnGateway(DeleteVpnGatewayRequest deleteVpnGatewayRequest) throws AmazonServiceException, AmazonClientException { + public DeleteVpnGatewayResult deleteVpnGateway(DeleteVpnGatewayRequest deleteVpnGatewayRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -767,6 +941,11 @@ public DescribeVolumeStatusResult describeVolumeStatus(DescribeVolumeStatusReque throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public DescribeVolumesModificationsResult describeVolumesModifications(DescribeVolumesModificationsRequest describeVolumesModificationsRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public DescribeImportSnapshotTasksResult describeImportSnapshotTasks(DescribeImportSnapshotTasksRequest describeImportSnapshotTasksRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); @@ -778,12 +957,12 @@ public DescribeVpnConnectionsResult describeVpnConnections(DescribeVpnConnection } @Override - public void resetImageAttribute(ResetImageAttributeRequest resetImageAttributeRequest) throws AmazonServiceException, AmazonClientException { + public ResetImageAttributeResult resetImageAttribute(ResetImageAttributeRequest resetImageAttributeRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void enableVgwRoutePropagation(EnableVgwRoutePropagationRequest enableVgwRoutePropagationRequest) throws AmazonServiceException, AmazonClientException { + public EnableVgwRoutePropagationResult enableVgwRoutePropagation(EnableVgwRoutePropagationRequest enableVgwRoutePropagationRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -793,7 +972,7 @@ public CreateSnapshotResult createSnapshot(CreateSnapshotRequest createSnapshotR } @Override - public void deleteVolume(DeleteVolumeRequest deleteVolumeRequest) throws AmazonServiceException, AmazonClientException { + public DeleteVolumeResult deleteVolume(DeleteVolumeRequest deleteVolumeRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -813,7 +992,12 @@ public CancelSpotFleetRequestsResult cancelSpotFleetRequests(CancelSpotFleetRequ } @Override - public void unassignPrivateIpAddresses(UnassignPrivateIpAddressesRequest unassignPrivateIpAddressesRequest) throws AmazonServiceException, AmazonClientException { + public UnassignPrivateIpAddressesResult unassignPrivateIpAddresses(UnassignPrivateIpAddressesRequest unassignPrivateIpAddressesRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public UnassignIpv6AddressesResult unassignIpv6Addresses(UnassignIpv6AddressesRequest unassignIpv6AddressesRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -823,7 +1007,7 @@ public DescribeVpcsResult describeVpcs(DescribeVpcsRequest describeVpcsRequest) } @Override - public void cancelConversionTask(CancelConversionTaskRequest cancelConversionTaskRequest) throws AmazonServiceException, AmazonClientException { + public CancelConversionTaskResult cancelConversionTask(CancelConversionTaskRequest cancelConversionTaskRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -833,12 +1017,27 @@ public AssociateAddressResult associateAddress(AssociateAddressRequest associate } @Override - public void deleteCustomerGateway(DeleteCustomerGatewayRequest deleteCustomerGatewayRequest) throws AmazonServiceException, AmazonClientException { + public AssociateIamInstanceProfileResult associateIamInstanceProfile(AssociateIamInstanceProfileRequest associateIamInstanceRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public AssociateVpcCidrBlockResult associateVpcCidrBlock(AssociateVpcCidrBlockRequest associateVpcCidrBlockRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public AssociateSubnetCidrBlockResult associateSubnetCidrBlock(AssociateSubnetCidrBlockRequest associateSubnetCidrBlockRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void createNetworkAclEntry(CreateNetworkAclEntryRequest createNetworkAclEntryRequest) throws AmazonServiceException, AmazonClientException { + public DeleteCustomerGatewayResult deleteCustomerGateway(DeleteCustomerGatewayRequest deleteCustomerGatewayRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateNetworkAclEntryResult createNetworkAclEntry(CreateNetworkAclEntryRequest createNetworkAclEntryRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -853,7 +1052,32 @@ public DescribeExportTasksResult describeExportTasks(DescribeExportTasksRequest } @Override - public void detachInternetGateway(DetachInternetGatewayRequest detachInternetGatewayRequest) throws AmazonServiceException, AmazonClientException { + public DescribeElasticGpusResult describeElasticGpus(DescribeElasticGpusRequest describeElasticGpusRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeFpgaImagesResult describeFpgaImages(DescribeFpgaImagesRequest describeFpgaImagesRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeHostReservationOfferingsResult describeHostReservationOfferings(DescribeHostReservationOfferingsRequest describeHostReservationOfferingsRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeHostReservationsResult describeHostReservations(DescribeHostReservationsRequest describeHostReservationsRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeIdentityIdFormatResult describeIdentityIdFormat(DescribeIdentityIdFormatRequest describeIdentityIdFormatRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DetachInternetGatewayResult detachInternetGateway(DetachInternetGatewayRequest detachInternetGatewayRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -883,7 +1107,7 @@ public DescribeReservedInstancesListingsResult describeReservedInstancesListings } @Override - public void reportInstanceStatus(ReportInstanceStatusRequest reportInstanceStatusRequest) throws AmazonServiceException, AmazonClientException { + public ReportInstanceStatusResult reportInstanceStatus(ReportInstanceStatusRequest reportInstanceStatusRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -896,6 +1120,12 @@ public DescribeRouteTablesResult describeRouteTables(DescribeRouteTablesRequest public DescribeDhcpOptionsResult describeDhcpOptions(DescribeDhcpOptionsRequest describeDhcpOptionsRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } + + @Override + public DescribeEgressOnlyInternetGatewaysResult describeEgressOnlyInternetGateways( + DescribeEgressOnlyInternetGatewaysRequest describeEgressOnlyInternetGatewaysRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } @Override public MonitorInstancesResult monitorInstances(MonitorInstancesRequest monitorInstancesRequest) throws AmazonServiceException, AmazonClientException { @@ -933,17 +1163,22 @@ public ImportInstanceResult importInstance(ImportInstanceRequest importInstanceR } @Override - public void revokeSecurityGroupIngress(RevokeSecurityGroupIngressRequest revokeSecurityGroupIngressRequest) throws AmazonServiceException, AmazonClientException { + public DeleteVpcPeeringConnectionResult deleteVpcPeeringConnection(DeleteVpcPeeringConnectionRequest deleteVpcPeeringConnectionRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DeleteVpcPeeringConnectionResult deleteVpcPeeringConnection(DeleteVpcPeeringConnectionRequest deleteVpcPeeringConnectionRequest) throws AmazonServiceException, AmazonClientException { + public GetConsoleOutputResult getConsoleOutput(GetConsoleOutputRequest getConsoleOutputRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public GetConsoleOutputResult getConsoleOutput(GetConsoleOutputRequest getConsoleOutputRequest) throws AmazonServiceException, AmazonClientException { + public GetConsoleScreenshotResult getConsoleScreenshot(GetConsoleScreenshotRequest getConsoleScreenshotRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public GetHostReservationPurchasePreviewResult getHostReservationPurchasePreview(GetHostReservationPurchasePreviewRequest getHostReservationPurchasePreviewRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -953,17 +1188,17 @@ public CreateInternetGatewayResult createInternetGateway(CreateInternetGatewayRe } @Override - public void deleteVpnConnectionRoute(DeleteVpnConnectionRouteRequest deleteVpnConnectionRouteRequest) throws AmazonServiceException, AmazonClientException { + public DeleteVpnConnectionRouteResult deleteVpnConnectionRoute(DeleteVpnConnectionRouteRequest deleteVpnConnectionRouteRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void detachNetworkInterface(DetachNetworkInterfaceRequest detachNetworkInterfaceRequest) throws AmazonServiceException, AmazonClientException { + public DetachNetworkInterfaceResult detachNetworkInterface(DetachNetworkInterfaceRequest detachNetworkInterfaceRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void modifyImageAttribute(ModifyImageAttributeRequest modifyImageAttributeRequest) throws AmazonServiceException, AmazonClientException { + public ModifyImageAttributeResult modifyImageAttribute(ModifyImageAttributeRequest modifyImageAttributeRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -972,18 +1207,38 @@ public CreateCustomerGatewayResult createCustomerGateway(CreateCustomerGatewayRe throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public CreateEgressOnlyInternetGatewayResult createEgressOnlyInternetGateway(CreateEgressOnlyInternetGatewayRequest createEgressOnlyInternetGatewayRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateFpgaImageResult createFpgaImage(CreateFpgaImageRequest createFpgaImageRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateNetworkInterfacePermissionResult createNetworkInterfacePermission(CreateNetworkInterfacePermissionRequest createNetworkInterfacePermissionRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateDefaultVpcResult createDefaultVpc(CreateDefaultVpcRequest createDefaultVpcRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public CreateSpotDatafeedSubscriptionResult createSpotDatafeedSubscription(CreateSpotDatafeedSubscriptionRequest createSpotDatafeedSubscriptionRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void attachInternetGateway(AttachInternetGatewayRequest attachInternetGatewayRequest) throws AmazonServiceException, AmazonClientException { + public AttachInternetGatewayResult attachInternetGateway(AttachInternetGatewayRequest attachInternetGatewayRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void deleteVpnConnection(DeleteVpnConnectionRequest deleteVpnConnectionRequest) throws AmazonServiceException, AmazonClientException { + public DeleteVpnConnectionResult deleteVpnConnection(DeleteVpnConnectionRequest deleteVpnConnectionRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1058,12 +1313,12 @@ public AssociateRouteTableResult associateRouteTable(AssociateRouteTableRequest } @Override - public void modifyVolumeAttribute(ModifyVolumeAttributeRequest modifyVolumeAttributeRequest) throws AmazonServiceException, AmazonClientException { + public ModifyVolumeAttributeResult modifyVolumeAttribute(ModifyVolumeAttributeRequest modifyVolumeAttributeRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void deleteNetworkAcl(DeleteNetworkAclRequest deleteNetworkAclRequest) throws AmazonServiceException, AmazonClientException { + public DeleteNetworkAclResult deleteNetworkAcl(DeleteNetworkAclRequest deleteNetworkAclRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1078,7 +1333,7 @@ public StartInstancesResult startInstances(StartInstancesRequest startInstancesR } @Override - public void modifyInstanceAttribute(ModifyInstanceAttributeRequest modifyInstanceAttributeRequest) throws AmazonServiceException, AmazonClientException { + public ModifyInstanceAttributeResult modifyInstanceAttribute(ModifyInstanceAttributeRequest modifyInstanceAttributeRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1087,18 +1342,33 @@ public ModifyInstancePlacementResult modifyInstancePlacement(ModifyInstancePlace throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public ModifyIdentityIdFormatResult modifyIdentityIdFormat(ModifyIdentityIdFormatRequest modifyIdentityIdFormatRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public CancelReservedInstancesListingResult cancelReservedInstancesListing(CancelReservedInstancesListingRequest cancelReservedInstancesListingRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void deleteDhcpOptions(DeleteDhcpOptionsRequest deleteDhcpOptionsRequest) throws AmazonServiceException, AmazonClientException { + public DeleteDhcpOptionsResult deleteDhcpOptions(DeleteDhcpOptionsRequest deleteDhcpOptionsRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void authorizeSecurityGroupIngress(AuthorizeSecurityGroupIngressRequest authorizeSecurityGroupIngressRequest) throws AmazonServiceException, AmazonClientException { + public DeleteEgressOnlyInternetGatewayResult deleteEgressOnlyInternetGateway(DeleteEgressOnlyInternetGatewayRequest deleteEgressOnlyInternetGatewayRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteNetworkInterfacePermissionResult deleteNetworkInterfacePermission(DeleteNetworkInterfacePermissionRequest deleteNetworkInterfacePermissionRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public AuthorizeSecurityGroupIngressResult authorizeSecurityGroupIngress(AuthorizeSecurityGroupIngressRequest authorizeSecurityGroupIngressRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1118,7 +1388,7 @@ public DescribeCustomerGatewaysResult describeCustomerGateways(DescribeCustomerG } @Override - public void cancelExportTask(CancelExportTaskRequest cancelExportTaskRequest) throws AmazonServiceException, AmazonClientException { + public CancelExportTaskResult cancelExportTask(CancelExportTaskRequest cancelExportTaskRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1143,12 +1413,12 @@ public DescribeVpcClassicLinkResult describeVpcClassicLink(DescribeVpcClassicLin } @Override - public void modifyNetworkInterfaceAttribute(ModifyNetworkInterfaceAttributeRequest modifyNetworkInterfaceAttributeRequest) throws AmazonServiceException, AmazonClientException { + public ModifyNetworkInterfaceAttributeResult modifyNetworkInterfaceAttribute(ModifyNetworkInterfaceAttributeRequest modifyNetworkInterfaceAttributeRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void deleteRouteTable(DeleteRouteTableRequest deleteRouteTableRequest) throws AmazonServiceException, AmazonClientException { + public DeleteRouteTableResult deleteRouteTable(DeleteRouteTableRequest deleteRouteTableRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1168,7 +1438,7 @@ public RequestSpotInstancesResult requestSpotInstances(RequestSpotInstancesReque } @Override - public void createTags(CreateTagsRequest createTagsRequest) throws AmazonServiceException, AmazonClientException { + public CreateTagsResult createTags(CreateTagsRequest createTagsRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1183,7 +1453,7 @@ public AttachNetworkInterfaceResult attachNetworkInterface(AttachNetworkInterfac } @Override - public void replaceRoute(ReplaceRouteRequest replaceRouteRequest) throws AmazonServiceException, AmazonClientException { + public ReplaceRouteResult replaceRoute(ReplaceRouteRequest replaceRouteRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1198,7 +1468,7 @@ public CancelBundleTaskResult cancelBundleTask(CancelBundleTaskRequest cancelBun } @Override - public void disableVgwRoutePropagation(DisableVgwRoutePropagationRequest disableVgwRoutePropagationRequest) throws AmazonServiceException, AmazonClientException { + public DisableVgwRoutePropagationResult disableVgwRoutePropagation(DisableVgwRoutePropagationRequest disableVgwRoutePropagationRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1228,7 +1498,12 @@ public PurchaseScheduledInstancesResult purchaseScheduledInstances(PurchaseSched } @Override - public void modifySnapshotAttribute(ModifySnapshotAttributeRequest modifySnapshotAttributeRequest) throws AmazonServiceException, AmazonClientException { + public PurchaseHostReservationResult purchaseHostReservation(PurchaseHostReservationRequest purchaseHostReservationRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ModifySnapshotAttributeResult modifySnapshotAttribute(ModifySnapshotAttributeRequest modifySnapshotAttributeRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1248,12 +1523,12 @@ public ModifyVpcEndpointResult modifyVpcEndpoint(ModifyVpcEndpointRequest modify } @Override - public void deleteSpotDatafeedSubscription(DeleteSpotDatafeedSubscriptionRequest deleteSpotDatafeedSubscriptionRequest) throws AmazonServiceException, AmazonClientException { + public DeleteSpotDatafeedSubscriptionResult deleteSpotDatafeedSubscription(DeleteSpotDatafeedSubscriptionRequest deleteSpotDatafeedSubscriptionRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void deleteInternetGateway(DeleteInternetGatewayRequest deleteInternetGatewayRequest) throws AmazonServiceException, AmazonClientException { + public DeleteInternetGatewayResult deleteInternetGateway(DeleteInternetGatewayRequest deleteInternetGatewayRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1288,7 +1563,22 @@ public ConfirmProductInstanceResult confirmProductInstance(ConfirmProductInstanc } @Override - public void disassociateRouteTable(DisassociateRouteTableRequest disassociateRouteTableRequest) throws AmazonServiceException, AmazonClientException { + public DisassociateRouteTableResult disassociateRouteTable(DisassociateRouteTableRequest disassociateRouteTableRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DisassociateIamInstanceProfileResult disassociateIamInstanceProfile(DisassociateIamInstanceProfileRequest disassociateIamInstanceProfileRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DisassociateVpcCidrBlockResult disassociateVpcCidrBlock(DisassociateVpcCidrBlockRequest disassociateVpcCidrBlockRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DisassociateSubnetCidrBlockResult disassociateSubnetCidrBlock(DisassociateSubnetCidrBlockRequest disassociateSubnetCidrBlockRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1298,12 +1588,12 @@ public DescribeVpcAttributeResult describeVpcAttribute(DescribeVpcAttributeReque } @Override - public void revokeSecurityGroupEgress(RevokeSecurityGroupEgressRequest revokeSecurityGroupEgressRequest) throws AmazonServiceException, AmazonClientException { + public RevokeSecurityGroupEgressResult revokeSecurityGroupEgress(RevokeSecurityGroupEgressRequest revokeSecurityGroupEgressRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void deleteNetworkAclEntry(DeleteNetworkAclEntryRequest deleteNetworkAclEntryRequest) throws AmazonServiceException, AmazonClientException { + public DeleteNetworkAclEntryResult deleteNetworkAclEntry(DeleteNetworkAclEntryRequest deleteNetworkAclEntryRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1312,6 +1602,11 @@ public CreateVolumeResult createVolume(CreateVolumeRequest createVolumeRequest) throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public ModifyVolumeResult modifyVolume(ModifyVolumeRequest modifyVolumeRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public DescribeInstanceStatusResult describeInstanceStatus(DescribeInstanceStatusRequest describeInstanceStatusRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); @@ -1333,7 +1628,12 @@ public DescribeReservedInstancesOfferingsResult describeReservedInstancesOfferin } @Override - public void assignPrivateIpAddresses(AssignPrivateIpAddressesRequest assignPrivateIpAddressesRequest) throws AmazonServiceException, AmazonClientException { + public AssignPrivateIpAddressesResult assignPrivateIpAddresses(AssignPrivateIpAddressesRequest assignPrivateIpAddressesRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public AssignIpv6AddressesResult assignIpv6Addresses(AssignIpv6AddressesRequest assignIpv6AddressesRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1343,7 +1643,7 @@ public DescribeSpotFleetRequestHistoryResult describeSpotFleetRequestHistory(Des } @Override - public void deleteSnapshot(DeleteSnapshotRequest deleteSnapshotRequest) throws AmazonServiceException, AmazonClientException { + public DeleteSnapshotResult deleteSnapshot(DeleteSnapshotRequest deleteSnapshotRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1353,12 +1653,12 @@ public ReplaceNetworkAclAssociationResult replaceNetworkAclAssociation(ReplaceNe } @Override - public void disassociateAddress(DisassociateAddressRequest disassociateAddressRequest) throws AmazonServiceException, AmazonClientException { + public DisassociateAddressResult disassociateAddress(DisassociateAddressRequest disassociateAddressRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void createPlacementGroup(CreatePlacementGroupRequest createPlacementGroupRequest) throws AmazonServiceException, AmazonClientException { + public CreatePlacementGroupResult createPlacementGroup(CreatePlacementGroupRequest createPlacementGroupRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1368,17 +1668,17 @@ public BundleInstanceResult bundleInstance(BundleInstanceRequest bundleInstanceR } @Override - public void deletePlacementGroup(DeletePlacementGroupRequest deletePlacementGroupRequest) throws AmazonServiceException, AmazonClientException { + public DeletePlacementGroupResult deletePlacementGroup(DeletePlacementGroupRequest deletePlacementGroupRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void modifySubnetAttribute(ModifySubnetAttributeRequest modifySubnetAttributeRequest) throws AmazonServiceException, AmazonClientException { + public ModifySubnetAttributeResult modifySubnetAttribute(ModifySubnetAttributeRequest modifySubnetAttributeRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void deleteVpc(DeleteVpcRequest deleteVpcRequest) throws AmazonServiceException, AmazonClientException { + public DeleteVpcResult deleteVpc(DeleteVpcRequest deleteVpcRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1398,7 +1698,7 @@ public AllocateAddressResult allocateAddress(AllocateAddressRequest allocateAddr } @Override - public void releaseAddress(ReleaseAddressRequest releaseAddressRequest) throws AmazonServiceException, AmazonClientException { + public ReleaseAddressResult releaseAddress(ReleaseAddressRequest releaseAddressRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1408,7 +1708,12 @@ public ReleaseHostsResult releaseHosts(ReleaseHostsRequest releaseHostsRequest) } @Override - public void resetInstanceAttribute(ResetInstanceAttributeRequest resetInstanceAttributeRequest) throws AmazonServiceException, AmazonClientException { + public ReplaceIamInstanceProfileAssociationResult replaceIamInstanceProfileAssociation(ReplaceIamInstanceProfileAssociationRequest replaceIamInstanceProfileAssociationRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ResetInstanceAttributeResult resetInstanceAttribute(ResetInstanceAttributeRequest resetInstanceAttributeRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1423,7 +1728,7 @@ public CreateNatGatewayResult createNatGateway(CreateNatGatewayRequest createNat } @Override - public void replaceNetworkAclEntry(ReplaceNetworkAclEntryRequest replaceNetworkAclEntryRequest) throws AmazonServiceException, AmazonClientException { + public ReplaceNetworkAclEntryResult replaceNetworkAclEntry(ReplaceNetworkAclEntryRequest replaceNetworkAclEntryRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1443,7 +1748,7 @@ public RegisterImageResult registerImage(RegisterImageRequest registerImageReque } @Override - public void resetNetworkInterfaceAttribute(ResetNetworkInterfaceAttributeRequest resetNetworkInterfaceAttributeRequest) throws AmazonServiceException, AmazonClientException { + public ResetNetworkInterfaceAttributeResult resetNetworkInterfaceAttribute(ResetNetworkInterfaceAttributeRequest resetNetworkInterfaceAttributeRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1458,7 +1763,7 @@ public EnableVpcClassicLinkDnsSupportResult enableVpcClassicLinkDnsSupport(Enabl } @Override - public void createVpnConnectionRoute(CreateVpnConnectionRouteRequest createVpnConnectionRouteRequest) throws AmazonServiceException, AmazonClientException { + public CreateVpnConnectionRouteResult createVpnConnectionRoute(CreateVpnConnectionRouteRequest createVpnConnectionRouteRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1608,7 +1913,12 @@ public DescribeBundleTasksResult describeBundleTasks() throws AmazonServiceExcep } @Override - public void revokeSecurityGroupIngress() throws AmazonServiceException, AmazonClientException { + public RevokeSecurityGroupIngressResult revokeSecurityGroupIngress(RevokeSecurityGroupIngressRequest revokeSecurityGroupIngressRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public RevokeSecurityGroupIngressResult revokeSecurityGroupIngress() throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1652,6 +1962,12 @@ public DescribeHostsResult describeHosts() { throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public DescribeIamInstanceProfileAssociationsResult describeIamInstanceProfileAssociations( + DescribeIamInstanceProfileAssociationsRequest describeIamInstanceProfileAssociationsRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public DescribeIdFormatResult describeIdFormat(DescribeIdFormatRequest describeIdFormatRequest) { throw new UnsupportedOperationException("Not supported in mock"); @@ -1733,7 +2049,7 @@ public DescribeReservedInstancesModificationsResult describeReservedInstancesMod } @Override - public void deleteSpotDatafeedSubscription() throws AmazonServiceException, AmazonClientException { + public DeleteSpotDatafeedSubscriptionResult deleteSpotDatafeedSubscription() throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1797,6 +2113,11 @@ public void shutdown() { throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public AmazonEC2Waiters waiters() { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public ResponseMetadata getCachedResponseMetadata(AmazonWebServiceRequest request) { throw new UnsupportedOperationException("Not supported in mock"); From 3d4e28aee144e46ee0e46aec07c078366f21322d Mon Sep 17 00:00:00 2001 From: Yu Date: Tue, 12 Sep 2017 14:29:10 +0200 Subject: [PATCH 07/67] Remove index mapper dynamic settings (#25734) Remove "index.mapper.dynamic" setting for 6.0 (and after) indices, but still keep working for 5.x (and before) indices. Remove two index dynamic disable test cases as the disability of index.mapper.dynamic is already removed for current version. Add a new test class for version test. --- .../index/mapper/MapperService.java | 13 +- .../mapper/DynamicMappingDisabledTests.java | 138 ------------------ .../index/mapper/DynamicMappingIT.java | 30 ---- .../mapper/DynamicMappingVersionTests.java | 80 ++++++++++ .../indices/settings/GetSettingsBlocksIT.java | 5 +- 5 files changed, 95 insertions(+), 171 deletions(-) delete mode 100644 core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java create mode 100644 core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingVersionTests.java diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 3a91d0e42ad73..c9851ed7a1d05 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -148,7 +148,18 @@ public MapperService(IndexSettings indexSettings, IndexAnalyzers indexAnalyzers, this.searchQuoteAnalyzer = new MapperAnalyzerWrapper(indexAnalyzers.getDefaultSearchQuoteAnalyzer(), p -> p.searchQuoteAnalyzer()); this.mapperRegistry = mapperRegistry; - this.dynamic = this.indexSettings.getValue(INDEX_MAPPER_DYNAMIC_SETTING); + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_6_0_0_alpha1)) { + if (INDEX_MAPPER_DYNAMIC_SETTING.exists(indexSettings.getSettings())) { + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0_alpha1)) { + throw new IllegalArgumentException("Setting " + INDEX_MAPPER_DYNAMIC_SETTING.getKey() + " was removed after version 6.0.0"); + } else { + DEPRECATION_LOGGER.deprecated("Setting " + INDEX_MAPPER_DYNAMIC_SETTING.getKey() + " is deprecated since indices may not have more than one type anymore."); + } + } + this.dynamic = INDEX_MAPPER_DYNAMIC_DEFAULT; + } else { + this.dynamic = this.indexSettings.getValue(INDEX_MAPPER_DYNAMIC_SETTING); + } defaultMappingSource = "{\"_default_\":{}}"; if (logger.isTraceEnabled()) { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java deleted file mode 100644 index 686bbafbcd23a..0000000000000 --- a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.mapper; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.bulk.BulkItemResponse; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.bulk.TransportBulkAction; -import org.elasticsearch.action.bulk.TransportShardBulkAction; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.AutoCreateIndex; -import org.elasticsearch.action.update.UpdateHelper; -import org.elasticsearch.client.Requests; -import org.elasticsearch.cluster.action.shard.ShardStateAction; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.MockTcpTransport; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportService; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.BeforeClass; - -import java.util.Collections; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; -import static org.hamcrest.CoreMatchers.instanceOf; - -public class DynamicMappingDisabledTests extends ESSingleNodeTestCase { - - private static ThreadPool threadPool; - private ClusterService clusterService; - private TransportService transportService; - private TransportBulkAction transportBulkAction; - - @BeforeClass - public static void createThreadPool() { - threadPool = new TestThreadPool("DynamicMappingDisabledTests"); - } - - @Override - public void setUp() throws Exception { - super.setUp(); - Settings settings = Settings.builder() - .put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), false) - .build(); - clusterService = createClusterService(threadPool); - Transport transport = new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE, - new NoneCircuitBreakerService(), new NamedWriteableRegistry(Collections.emptyList()), - new NetworkService(Collections.emptyList())); - transportService = new TransportService(clusterService.getSettings(), transport, threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - ShardStateAction shardStateAction = new ShardStateAction(settings, clusterService, transportService, null, null, threadPool); - ActionFilters actionFilters = new ActionFilters(Collections.emptySet()); - IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(settings); - AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, new ClusterSettings(settings, - ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), indexNameExpressionResolver); - UpdateHelper updateHelper = new UpdateHelper(settings, null); - TransportShardBulkAction shardBulkAction = new TransportShardBulkAction(settings, transportService, clusterService, - indicesService, threadPool, shardStateAction, null, updateHelper, actionFilters, indexNameExpressionResolver); - transportBulkAction = new TransportBulkAction(settings, threadPool, transportService, clusterService, - null, shardBulkAction, null, actionFilters, indexNameExpressionResolver, autoCreateIndex, System::currentTimeMillis); - } - - @After - public void tearDown() throws Exception { - super.tearDown(); - clusterService.close(); - transportService.close(); - } - - - @AfterClass - public static void destroyThreadPool() { - ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); - // since static must set to null to be eligible for collection - threadPool = null; - } - - public void testDynamicDisabled() { - IndexRequest request = new IndexRequest("index", "type", "1"); - request.source(Requests.INDEX_CONTENT_TYPE, "foo", 3); - BulkRequest bulkRequest = new BulkRequest(); - bulkRequest.add(request); - final AtomicBoolean gotResponse = new AtomicBoolean(); - - transportBulkAction.execute(bulkRequest, new ActionListener() { - @Override - public void onResponse(BulkResponse bulkResponse) { - BulkItemResponse itemResponse = bulkResponse.getItems()[0]; - assertTrue(itemResponse.isFailed()); - assertThat(itemResponse.getFailure().getCause(), instanceOf(IndexNotFoundException.class)); - assertEquals("no such index and [index.mapper.dynamic] is [false]", itemResponse.getFailure().getCause().getMessage()); - gotResponse.set(true); - } - - @Override - public void onFailure(Exception e) { - fail("unexpected failure in bulk action, expected failed bulk item"); - } - }); - - assertTrue(gotResponse.get()); - } -} diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java index d183242ee19fe..4172a6172005e 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java @@ -153,34 +153,4 @@ public void run() { assertTrue(client().prepareGet("index", "type", Integer.toString(i)).get().isExists()); } } - - public void testAutoCreateWithDisabledDynamicMappings() throws Exception { - assertAcked(client().admin().indices().preparePutTemplate("my_template") - .setCreate(true) - .setPatterns(Collections.singletonList("index_*")) - .addMapping("foo", "field", "type=keyword") - .setSettings(Settings.builder().put("index.mapper.dynamic", false).build()) - .get()); - - // succeeds since 'foo' has an explicit mapping in the template - indexRandom(true, false, client().prepareIndex("index_1", "foo", "1").setSource("field", "abc")); - - // fails since 'bar' does not have an explicit mapping in the template and dynamic template creation is disabled - TypeMissingException e1 = expectThrows(TypeMissingException.class, - () -> client().prepareIndex("index_2", "bar", "1").setSource("field", "abc").get()); - assertEquals("type[bar] missing", e1.getMessage()); - assertEquals("trying to auto create mapping, but dynamic mapping is disabled", e1.getCause().getMessage()); - - BulkResponse bulkResponse = client().prepareBulk().add(new IndexRequest("index_2", "bar", "2").source("field", "abc")).get(); - assertTrue(bulkResponse.hasFailures()); - BulkItemResponse.Failure firstFailure = bulkResponse.getItems()[0].getFailure(); - assertThat(firstFailure.getCause(), instanceOf(TypeMissingException.class)); - assertEquals("type[bar] missing", firstFailure.getCause().getMessage()); - assertEquals("trying to auto create mapping, but dynamic mapping is disabled", firstFailure.getCause().getCause().getMessage()); - - // make sure no mappings were created for bar - GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().addIndices("index_2").get(); - assertFalse(getIndexResponse.mappings().containsKey("bar")); - } - } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingVersionTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingVersionTests.java new file mode 100644 index 0000000000000..94af6c5454493 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingVersionTests.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.indices.TypeMissingException; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; + +import java.io.IOException; +import java.util.Collection; + +public class DynamicMappingVersionTests extends ESSingleNodeTestCase { + + @Override + protected Collection> getPlugins() { + return pluginList(InternalSettingsPlugin.class); + } + + public void testDynamicMappingDefault() throws IOException { + MapperService mapperService = createIndex("my-index").mapperService(); + DocumentMapper documentMapper = mapperService + .documentMapperWithAutoCreate("my-type").getDocumentMapper(); + + ParsedDocument parsedDoc = documentMapper.parse( + SourceToParse.source("my-index", "my-type", "1", XContentFactory.jsonBuilder() + .startObject() + .field("foo", 3) + .endObject() + .bytes(), XContentType.JSON)); + + String expectedMapping = XContentFactory.jsonBuilder().startObject() + .startObject("my-type") + .startObject("properties") + .startObject("foo").field("type", "long") + .endObject().endObject().endObject().endObject().string(); + assertEquals(expectedMapping, parsedDoc.dynamicMappingsUpdate().toString()); + } + + public void testDynamicMappingSettingRemoval() { + Settings settings = Settings.builder() + .put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), false) + .build(); + Exception e = expectThrows(IllegalArgumentException.class, () -> createIndex("test-index", settings)); + assertEquals(e.getMessage(), "Setting index.mapper.dynamic was removed after version 6.0.0"); + } + + public void testDynamicMappingDisablePreEs6() { + Settings settingsPreEs6 = Settings.builder() + .put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), false) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) + .build(); + MapperService preEs6MapperService = createIndex("pre-es6-index", settingsPreEs6).mapperService(); + Exception e = expectThrows(TypeMissingException.class, + () -> preEs6MapperService.documentMapperWithAutoCreate("pre-es6-type")); + assertEquals(e.getMessage(), "type[pre-es6-type] missing"); + } +} diff --git a/core/src/test/java/org/elasticsearch/indices/settings/GetSettingsBlocksIT.java b/core/src/test/java/org/elasticsearch/indices/settings/GetSettingsBlocksIT.java index cb45a639c07eb..3d9b2aab7ad16 100644 --- a/core/src/test/java/org/elasticsearch/indices/settings/GetSettingsBlocksIT.java +++ b/core/src/test/java/org/elasticsearch/indices/settings/GetSettingsBlocksIT.java @@ -21,6 +21,7 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.test.ESIntegTestCase; @@ -42,7 +43,7 @@ public void testGetSettingsWithBlocks() throws Exception { .setSettings(Settings.builder() .put("index.refresh_interval", -1) .put("index.merge.policy.expunge_deletes_allowed", "30") - .put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), false))); + .put(FieldMapper.IGNORE_MALFORMED_SETTING.getKey(), false))); for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY, SETTING_READ_ONLY_ALLOW_DELETE)) { try { @@ -51,7 +52,7 @@ public void testGetSettingsWithBlocks() throws Exception { assertThat(response.getIndexToSettings().size(), greaterThanOrEqualTo(1)); assertThat(response.getSetting("test", "index.refresh_interval"), equalTo("-1")); assertThat(response.getSetting("test", "index.merge.policy.expunge_deletes_allowed"), equalTo("30")); - assertThat(response.getSetting("test", MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey()), equalTo("false")); + assertThat(response.getSetting("test", FieldMapper.IGNORE_MALFORMED_SETTING.getKey()), equalTo("false")); } finally { disableIndexBlock("test", block); } From 42f3129d7bfd068fb705152c99b28d3d8e2f1cce Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Tue, 12 Sep 2017 15:32:33 +0200 Subject: [PATCH 08/67] Allow plugins to validate cluster-state on join (#26595) Today we don't have a pluggable way to validate if the cluster state is compatible with the node that joins. We already apply some checks for index compatibility that prevents nodes to join a cluster with indices it doesn't support but for plugins this isn't possible. This change adds a cluster state validator that allows plugins to prevent a join if the cluster-state is incompatible. --- .../discovery/DiscoveryModule.java | 13 ++++++-- .../discovery/zen/MembershipAction.java | 22 ++++++++++--- .../discovery/zen/ZenDiscovery.java | 33 +++++++++++++++---- .../plugins/DiscoveryPlugin.java | 11 +++++++ .../discovery/DiscoveryModuleTests.java | 22 ++++++++++++- .../discovery/zen/ZenDiscoveryUnitTests.java | 8 +++-- .../test/discovery/TestZenDiscovery.java | 2 +- 7 files changed, 94 insertions(+), 17 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index 0ecf40e65a1ba..179692cd516c8 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -19,6 +19,8 @@ package org.elasticsearch.discovery; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.cluster.service.MasterService; @@ -36,12 +38,15 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; +import java.util.function.BiConsumer; import java.util.function.Function; import java.util.function.Supplier; @@ -62,7 +67,7 @@ public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportServic ClusterApplier clusterApplier, ClusterSettings clusterSettings, List plugins, AllocationService allocationService) { final UnicastHostsProvider hostsProvider; - + final Collection> joinValidators = new ArrayList<>(); Map> hostProviders = new HashMap<>(); for (DiscoveryPlugin plugin : plugins) { plugin.getZenHostsProviders(transportService, networkService).entrySet().forEach(entry -> { @@ -70,6 +75,10 @@ public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportServic throw new IllegalArgumentException("Cannot register zen hosts provider [" + entry.getKey() + "] twice"); } }); + BiConsumer joinValidator = plugin.getJoinValidator(); + if (joinValidator != null) { + joinValidators.add(joinValidator); + } } Optional hostsProviderName = DISCOVERY_HOSTS_PROVIDER_SETTING.get(settings); if (hostsProviderName.isPresent()) { @@ -85,7 +94,7 @@ public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportServic Map> discoveryTypes = new HashMap<>(); discoveryTypes.put("zen", () -> new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, masterService, clusterApplier, - clusterSettings, hostsProvider, allocationService)); + clusterSettings, hostsProvider, allocationService, Collections.unmodifiableCollection(joinValidators))); discoveryTypes.put("single-node", () -> new SingleNodeDiscovery(settings, transportService, masterService, clusterApplier)); for (DiscoveryPlugin plugin : plugins) { plugin.getDiscoveryTypes(threadPool, transportService, namedWriteableRegistry, diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java b/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java index 18cac5818049f..fdfcd8ac29079 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java @@ -39,7 +39,10 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.util.Collection; import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; +import java.util.function.Supplier; public class MembershipAction extends AbstractComponent { @@ -63,7 +66,8 @@ public interface MembershipListener { private final MembershipListener listener; - public MembershipAction(Settings settings, TransportService transportService, MembershipListener listener) { + public MembershipAction(Settings settings, TransportService transportService, MembershipListener listener, + Collection> joinValidators) { super(settings); this.transportService = transportService; this.listener = listener; @@ -73,7 +77,7 @@ public MembershipAction(Settings settings, TransportService transportService, Me ThreadPool.Names.GENERIC, new JoinRequestRequestHandler()); transportService.registerRequestHandler(DISCOVERY_JOIN_VALIDATE_ACTION_NAME, () -> new ValidateJoinRequest(), ThreadPool.Names.GENERIC, - new ValidateJoinRequestRequestHandler()); + new ValidateJoinRequestRequestHandler(transportService::getLocalNode, joinValidators)); transportService.registerRequestHandler(DISCOVERY_LEAVE_ACTION_NAME, LeaveRequest::new, ThreadPool.Names.GENERIC, new LeaveRequestRequestHandler()); } @@ -176,12 +180,20 @@ public void writeTo(StreamOutput out) throws IOException { } static class ValidateJoinRequestRequestHandler implements TransportRequestHandler { + private final Supplier localNodeSupplier; + private final Collection> joinValidators; + + ValidateJoinRequestRequestHandler(Supplier localNodeSupplier, + Collection> joinValidators) { + this.localNodeSupplier = localNodeSupplier; + this.joinValidators = joinValidators; + } @Override public void messageReceived(ValidateJoinRequest request, TransportChannel channel) throws Exception { - ensureNodesCompatibility(Version.CURRENT, request.state.getNodes()); - ensureIndexCompatibility(Version.CURRENT, request.state.getMetaData()); - // for now, the mere fact that we can serialize the cluster state acts as validation.... + DiscoveryNode node = localNodeSupplier.get(); + assert node != null : "local node is null"; + joinValidators.stream().forEach(action -> action.accept(node, request.state)); channel.sendResponse(TransportResponse.Empty.INSTANCE); } } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index a4817fada36d2..249cce73765be 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -69,6 +69,8 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Set; @@ -78,6 +80,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.stream.Collectors; @@ -146,15 +149,17 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover private final NodeJoinController nodeJoinController; private final NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor; - private final ClusterApplier clusterApplier; private final AtomicReference committedState; // last committed cluster state private final Object stateMutex = new Object(); + private final Collection> onJoinValidators; public ZenDiscovery(Settings settings, ThreadPool threadPool, TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, MasterService masterService, ClusterApplier clusterApplier, - ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider, AllocationService allocationService) { + ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider, AllocationService allocationService, + Collection> onJoinValidators) { super(settings); + this.onJoinValidators = addBuiltInJoinValidators(onJoinValidators); this.masterService = masterService; this.clusterApplier = clusterApplier; this.transportService = transportService; @@ -211,7 +216,7 @@ public ZenDiscovery(Settings settings, ThreadPool threadPool, TransportService t namedWriteableRegistry, this, discoverySettings); - this.membership = new MembershipAction(settings, transportService, new MembershipListener()); + this.membership = new MembershipAction(settings, transportService, new MembershipListener(), onJoinValidators); this.joinThreadControl = new JoinThreadControl(); this.nodeJoinController = new NodeJoinController(masterService, allocationService, electMaster, settings); @@ -223,6 +228,17 @@ public ZenDiscovery(Settings settings, ThreadPool threadPool, TransportService t DISCOVERY_REJOIN_ACTION_NAME, RejoinClusterRequest::new, ThreadPool.Names.SAME, new RejoinClusterRequestHandler()); } + static Collection> addBuiltInJoinValidators( + Collection> onJoinValidators) { + Collection> validators = new ArrayList<>(); + validators.add((node, state) -> { + MembershipAction.ensureNodesCompatibility(node.getVersion(), state.getNodes()); + MembershipAction.ensureIndexCompatibility(node.getVersion(), state.getMetaData()); + }); + validators.addAll(onJoinValidators); + return Collections.unmodifiableCollection(validators); + } + // protected to allow overriding in tests protected ZenPing newZenPing(Settings settings, ThreadPool threadPool, TransportService transportService, UnicastHostsProvider hostsProvider) { @@ -885,8 +901,7 @@ void handleJoinRequest(final DiscoveryNode node, final ClusterState state, final } else { // we do this in a couple of places including the cluster update thread. This one here is really just best effort // to ensure we fail as fast as possible. - MembershipAction.ensureNodesCompatibility(node.getVersion(), state.getNodes()); - MembershipAction.ensureIndexCompatibility(node.getVersion(), state.getMetaData()); + onJoinValidators.stream().forEach(a -> a.accept(node, state)); if (state.getBlocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) == false) { MembershipAction.ensureMajorVersionBarrier(node.getVersion(), state.getNodes().getMinNodeVersion()); } @@ -898,7 +913,8 @@ void handleJoinRequest(final DiscoveryNode node, final ClusterState state, final try { membership.sendValidateJoinRequestBlocking(node, state, joinTimeout); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to validate incoming join request from node [{}]", node), e); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to validate incoming join request from node [{}]", node), + e); callback.onFailure(new IllegalStateException("failure when sending a validation request to node", e)); return; } @@ -1313,4 +1329,9 @@ public void start() { } } + + public final Collection> getOnJoinValidators() { + return onJoinValidators; + } + } diff --git a/core/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java b/core/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java index c3af5593cd7c4..912bcdc9d852a 100644 --- a/core/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java +++ b/core/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java @@ -19,10 +19,14 @@ package org.elasticsearch.plugins; +import java.util.Collection; import java.util.Collections; import java.util.Map; +import java.util.function.BiConsumer; import java.util.function.Supplier; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.cluster.service.MasterService; @@ -106,4 +110,11 @@ default Map> getZenHostsProviders(Transpo NetworkService networkService) { return Collections.emptyMap(); } + + /** + * Returns a consumer that validate the initial join cluster state. The validator, unless null is called exactly once per + * join attempt but might be called multiple times during the lifetime of a node. Validators are expected to throw a + * {@link IllegalStateException} if the node and the cluster-state are incompatible. + */ + default BiConsumer getJoinValidator() { return null; } } diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java index 39a9dbff959c6..8c2d84cd8c89d 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java @@ -20,6 +20,8 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.cluster.service.MasterService; @@ -40,10 +42,12 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiConsumer; import java.util.function.Supplier; import static org.mockito.Mockito.mock; @@ -160,7 +164,23 @@ public void testDuplicateHostsProvider() { public void testLazyConstructionHostsProvider() { DummyHostsProviderPlugin plugin = () -> Collections.singletonMap("custom", - () -> { throw new AssertionError("created hosts provider which was not selected"); }); + () -> { + throw new AssertionError("created hosts provider which was not selected"); + }); newModule(Settings.EMPTY, Collections.singletonList(plugin)); } + + public void testJoinValidator() { + BiConsumer consumer = (a, b) -> {}; + DiscoveryModule module = newModule(Settings.EMPTY, Collections.singletonList(new DiscoveryPlugin() { + @Override + public BiConsumer getJoinValidator() { + return consumer; + } + })); + ZenDiscovery discovery = (ZenDiscovery) module.getDiscovery(); + Collection> onJoinValidators = discovery.getOnJoinValidators(); + assertEquals(2, onJoinValidators.size()); + assertTrue(onJoinValidators.contains(consumer)); + } } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java index bc653e14e3275..b0dc783349ca8 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java @@ -320,7 +320,8 @@ public void onNewClusterState(String source, Supplier clusterState } }; ZenDiscovery zenDiscovery = new ZenDiscovery(settings, threadPool, service, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()), - masterService, clusterApplier, clusterSettings, Collections::emptyList, ESAllocationTestCase.createAllocationService()); + masterService, clusterApplier, clusterSettings, Collections::emptyList, ESAllocationTestCase.createAllocationService(), + Collections.emptyList()); zenDiscovery.start(); return zenDiscovery; } @@ -342,7 +343,10 @@ public void testValidateOnUnsupportedIndexVersionCreated() throws Exception { ClusterState.Builder stateBuilder = ClusterState.builder(ClusterName.DEFAULT); final DiscoveryNode otherNode = new DiscoveryNode("other_node", buildNewFakeTransportAddress(), emptyMap(), EnumSet.allOf(DiscoveryNode.Role.class), Version.CURRENT); - MembershipAction.ValidateJoinRequestRequestHandler request = new MembershipAction.ValidateJoinRequestRequestHandler(); + final DiscoveryNode localNode = new DiscoveryNode("other_node", buildNewFakeTransportAddress(), emptyMap(), + EnumSet.allOf(DiscoveryNode.Role.class), Version.CURRENT); + MembershipAction.ValidateJoinRequestRequestHandler request = new MembershipAction.ValidateJoinRequestRequestHandler + (() -> localNode, ZenDiscovery.addBuiltInJoinValidators(Collections.emptyList())); final boolean incompatible = randomBoolean(); IndexMetaData indexMetaData = IndexMetaData.builder("test").settings(Settings.builder() .put(SETTING_VERSION_CREATED, incompatible ? VersionUtils.getPreviousVersion(Version.CURRENT.minimumIndexCompatibilityVersion()) diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java index 63212cddc39b1..d224d9c519c8a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java +++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java @@ -83,7 +83,7 @@ private TestZenDiscovery(Settings settings, ThreadPool threadPool, TransportServ ClusterApplier clusterApplier, ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider, AllocationService allocationService) { super(settings, threadPool, transportService, namedWriteableRegistry, masterService, clusterApplier, clusterSettings, - hostsProvider, allocationService); + hostsProvider, allocationService, Collections.emptyList()); } @Override From 0e57a416f1fc3e06bf7596aaf56aadef54e39c8d Mon Sep 17 00:00:00 2001 From: Michael Basnight Date: Mon, 11 Sep 2017 16:30:38 -0500 Subject: [PATCH 09/67] Handle the 5.6.0 release --- core/src/main/java/org/elasticsearch/Version.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index 935278044c483..441b59def4cc2 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -92,6 +92,8 @@ public class Version implements Comparable { public static final Version V_5_5_3 = new Version(V_5_5_3_ID, org.apache.lucene.util.Version.LUCENE_6_6_0); public static final int V_5_6_0_ID = 5060099; public static final Version V_5_6_0 = new Version(V_5_6_0_ID, org.apache.lucene.util.Version.LUCENE_6_6_0); + public static final int V_5_6_1_ID = 5060199; + public static final Version V_5_6_1 = new Version(V_5_6_1_ID, org.apache.lucene.util.Version.LUCENE_6_6_0); public static final int V_6_0_0_alpha1_ID = 6000001; public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0); @@ -142,6 +144,8 @@ public static Version fromId(int id) { return V_6_0_0_alpha2; case V_6_0_0_alpha1_ID: return V_6_0_0_alpha1; + case V_5_6_1_ID: + return V_5_6_1; case V_5_6_0_ID: return V_5_6_0; case V_5_5_3_ID: From b01b1c2a58150076c48413a4fb857e6a01fe23dc Mon Sep 17 00:00:00 2001 From: David Pilato Date: Tue, 12 Sep 2017 16:51:44 +0200 Subject: [PATCH 10/67] Remove azure deprecated settings (#26099) Follow up for #23405. We remove azure deprecated settings in 7.0: * The legacy azure settings which where starting with `cloud.azure.storage.` prefix have been removed. This includes `account`, `key`, `default` and `timeout`. You need to use settings which are starting with `azure.client.` prefix instead. * Global timeout setting `cloud.azure.storage.timeout` has been removed. You must set it per azure client instead. Like `azure.client.default.timeout: 10s` for example. --- docs/plugins/repository-azure.asciidoc | 2 +- docs/reference/migration/migrate_7_0.asciidoc | 2 + .../migration/migrate_7_0/plugins.asciidoc | 14 ++ plugins/repository-azure/build.gradle | 2 - .../azure/storage/AzureStorageService.java | 20 --- .../storage/AzureStorageServiceImpl.java | 63 ++------ .../azure/storage/AzureStorageSettings.java | 123 +-------------- .../azure/AzureRepositoryPlugin.java | 7 - .../storage/AzureStorageServiceTests.java | 121 +-------------- .../AzureStorageSettingsFilterTests.java | 72 --------- .../azure/AzureSettingsParserTests.java | 143 ------------------ 11 files changed, 32 insertions(+), 537 deletions(-) create mode 100644 docs/reference/migration/migrate_7_0/plugins.asciidoc delete mode 100644 plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilterTests.java delete mode 100644 plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSettingsParserTests.java diff --git a/docs/plugins/repository-azure.asciidoc b/docs/plugins/repository-azure.asciidoc index a7b4dbaa7fbdf..3fc4a5a0cad21 100644 --- a/docs/plugins/repository-azure.asciidoc +++ b/docs/plugins/repository-azure.asciidoc @@ -46,7 +46,7 @@ before retrying after a first timeout or failure. The maximum backoff period is [source,yaml] ---- -cloud.azure.storage.timeout: 10s +azure.client.default.timeout: 10s azure.client.default.max_retries: 7 azure.client.secondary.timeout: 30s ---- diff --git a/docs/reference/migration/migrate_7_0.asciidoc b/docs/reference/migration/migrate_7_0.asciidoc index 6d01401754fd5..043d62465be39 100644 --- a/docs/reference/migration/migrate_7_0.asciidoc +++ b/docs/reference/migration/migrate_7_0.asciidoc @@ -29,9 +29,11 @@ way to reindex old indices is to use the `reindex` API. * <> * <> * <> +* <> include::migrate_7_0/aggregations.asciidoc[] include::migrate_7_0/cluster.asciidoc[] include::migrate_7_0/indices.asciidoc[] include::migrate_7_0/mappings.asciidoc[] include::migrate_7_0/search.asciidoc[] +include::migrate_7_0/plugins.asciidoc[] diff --git a/docs/reference/migration/migrate_7_0/plugins.asciidoc b/docs/reference/migration/migrate_7_0/plugins.asciidoc new file mode 100644 index 0000000000000..6bc9edec0dabc --- /dev/null +++ b/docs/reference/migration/migrate_7_0/plugins.asciidoc @@ -0,0 +1,14 @@ +[[breaking_70_plugins_changes]] +=== Plugins changes + +==== Azure Repository plugin + +* The legacy azure settings which where starting with `cloud.azure.storage.` prefix have been removed. +This includes `account`, `key`, `default` and `timeout`. +You need to use settings which are starting with `azure.client.` prefix instead. + +* Global timeout setting `cloud.azure.storage.timeout` has been removed. +You must set it per azure client instead. Like `azure.client.default.timeout: 10s` for example. + +See {plugins}/repository-azure-usage.html#repository-azure-repository-settings[Azure Repository settings]. + diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 3264b512b2de9..632fa56e1e9da 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -43,8 +43,6 @@ thirdPartyAudit.excludes = [ ] integTestCluster { - setting 'cloud.azure.storage.my_account_test.account', 'cloudazureresource' - setting 'cloud.azure.storage.my_account_test.key', 'abcdefgh' keystoreSetting 'azure.client.default.account', 'cloudazureresource' keystoreSetting 'azure.client.default.key', 'abcdefgh' keystoreSetting 'azure.client.secondary.account', 'cloudazureresource' diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java index 79455a78c005c..778fe44f15a3c 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java @@ -22,12 +22,8 @@ import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; import org.elasticsearch.common.blobstore.BlobMetaData; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; import java.io.IOException; import java.io.InputStream; @@ -44,22 +40,6 @@ public interface AzureStorageService { ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES); ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue(64, ByteSizeUnit.MB); - final class Storage { - @Deprecated - public static final String PREFIX = "cloud.azure.storage."; - - @Deprecated - public static final Setting STORAGE_ACCOUNTS = Setting.groupSetting(Storage.PREFIX, Setting.Property.NodeScope); - - /** - * Azure timeout (defaults to -1 minute) - * @deprecated We don't want to support global timeout settings anymore - */ - @Deprecated - static final Setting TIMEOUT_SETTING = - Setting.timeSetting("cloud.azure.storage.timeout", TimeValue.timeValueMinutes(-1), Property.NodeScope, Property.Deprecated); - } - boolean doesContainerExist(String account, LocationMode mode, String container); void removeContainer(String account, LocationMode mode, String container) throws URISyntaxException, StorageException; diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java index 8268cba7f3e7f..cefbbf8c606ee 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java @@ -33,11 +33,9 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cloud.azure.blobstore.util.SocketAccess; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.RepositoryException; @@ -53,40 +51,19 @@ public class AzureStorageServiceImpl extends AbstractComponent implements AzureStorageService { final Map storageSettings; - final Map deprecatedStorageSettings; - final Map clients; + final Map clients = new HashMap<>(); - public AzureStorageServiceImpl(Settings settings, Map regularStorageSettings) { + public AzureStorageServiceImpl(Settings settings, Map storageSettings) { super(settings); - if (regularStorageSettings.isEmpty()) { - this.storageSettings = new HashMap<>(); - // We have deprecated settings so we need to migrate them to the new implementation - Tuple> storageSettingsMapTuple = AzureStorageSettings.loadLegacy(settings); - deprecatedStorageSettings = storageSettingsMapTuple.v2(); - if (storageSettingsMapTuple.v1() != null) { - if (storageSettingsMapTuple.v1().getName().equals("default") == false) { - // We add the primary configuration to the list of all settings with its deprecated name in case someone is - // forcing a specific configuration name when creating the repository instance - deprecatedStorageSettings.put(storageSettingsMapTuple.v1().getName(), storageSettingsMapTuple.v1()); - } - // We add the primary configuration to the list of all settings as the "default" one - deprecatedStorageSettings.put("default", storageSettingsMapTuple.v1()); - } else { - // If someone did not register any settings or deprecated settings, they - // basically can't use the plugin - throw new IllegalArgumentException("If you want to use an azure repository, you need to define a client configuration."); - } - + this.storageSettings = storageSettings; - } else { - this.storageSettings = regularStorageSettings; - this.deprecatedStorageSettings = new HashMap<>(); + if (storageSettings.isEmpty()) { + // If someone did not register any settings, they basically can't use the plugin + throw new IllegalArgumentException("If you want to use an azure repository, you need to define a client configuration."); } - this.clients = new HashMap<>(); - logger.debug("starting azure storage client instance"); // We register all regular azure clients @@ -94,12 +71,6 @@ public AzureStorageServiceImpl(Settings settings, Map azureStorageSettingsEntry : this.deprecatedStorageSettings.entrySet()) { - logger.debug("registering deprecated client for account [{}]", azureStorageSettingsEntry.getKey()); - createClient(azureStorageSettingsEntry.getValue()); - } } void createClient(AzureStorageSettings azureStorageSettings) { @@ -125,31 +96,21 @@ void createClient(AzureStorageSettings azureStorageSettings) { } } - CloudBlobClient getSelectedClient(String account, LocationMode mode) { - logger.trace("selecting a client for account [{}], mode [{}]", account, mode.name()); - AzureStorageSettings azureStorageSettings = this.storageSettings.get(account); + CloudBlobClient getSelectedClient(String clientName, LocationMode mode) { + logger.trace("selecting a client named [{}], mode [{}]", clientName, mode.name()); + AzureStorageSettings azureStorageSettings = this.storageSettings.get(clientName); if (azureStorageSettings == null) { - // We can't find a client that has been registered using regular settings so we try deprecated client - azureStorageSettings = this.deprecatedStorageSettings.get(account); - if (azureStorageSettings == null) { - // We did not get an account. That's bad. - if (Strings.hasLength(account)) { - throw new IllegalArgumentException("Can not find named azure client [" + account + - "]. Check your elasticsearch.yml."); - } - throw new IllegalArgumentException("Can not find primary/secondary client using deprecated settings. " + - "Check your elasticsearch.yml."); - } + throw new IllegalArgumentException("Can not find named azure client [" + clientName + "]. Check your settings."); } CloudBlobClient client = this.clients.get(azureStorageSettings.getAccount()); if (client == null) { - throw new IllegalArgumentException("Can not find an azure client for account [" + azureStorageSettings.getAccount() + "]"); + throw new IllegalArgumentException("Can not find an azure client named [" + azureStorageSettings.getAccount() + "]"); } // NOTE: for now, just set the location mode in case it is different; - // only one mode per storage account can be active at a time + // only one mode per storage clientName can be active at a time client.getDefaultRequestOptions().setLocationMode(mode); // Set timeout option if the user sets cloud.azure.storage.timeout or cloud.azure.storage.xxx.timeout (it's negative by default) diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java index 5478ba60e0ea5..b33822eee61f0 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java @@ -20,26 +20,19 @@ package org.elasticsearch.cloud.azure.storage; import com.microsoft.azure.storage.RetryPolicy; -import org.elasticsearch.cloud.azure.storage.AzureStorageService.Storage; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.SecureSetting; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.AffixSetting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.TimeValue; -import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Set; -import static org.elasticsearch.cloud.azure.storage.AzureStorageService.Storage.STORAGE_ACCOUNTS; - public final class AzureStorageSettings { // prefix for azure client settings private static final String PREFIX = "azure.client."; @@ -64,56 +57,20 @@ public final class AzureStorageSettings { key -> SecureSetting.secureString(key, null)); public static final AffixSetting TIMEOUT_SETTING = Setting.affixKeySetting(PREFIX, "timeout", - (key) -> Setting.timeSetting(key, Storage.TIMEOUT_SETTING, Property.NodeScope)); - - - @Deprecated - public static final Setting DEPRECATED_TIMEOUT_SETTING = Setting.affixKeySetting(Storage.PREFIX, "timeout", - (key) -> Setting.timeSetting(key, Storage.TIMEOUT_SETTING, Property.NodeScope, Property.Deprecated)); - @Deprecated - public static final Setting DEPRECATED_ACCOUNT_SETTING = Setting.affixKeySetting(Storage.PREFIX, "account", - (key) -> Setting.simpleString(key, Property.NodeScope, Property.Deprecated)); - @Deprecated - public static final Setting DEPRECATED_KEY_SETTING = Setting.affixKeySetting(Storage.PREFIX, "key", - (key) -> Setting.simpleString(key, Property.NodeScope, Property.Deprecated)); - @Deprecated - public static final Setting DEPRECATED_DEFAULT_SETTING = Setting.affixKeySetting(Storage.PREFIX, "default", - (key) -> Setting.boolSetting(key, false, Property.NodeScope, Property.Deprecated)); - + (key) -> Setting.timeSetting(key, TimeValue.timeValueMinutes(-1), Property.NodeScope)); - @Deprecated - private final String name; private final String account; private final String key; private final TimeValue timeout; - @Deprecated - private final boolean activeByDefault; private final int maxRetries; public AzureStorageSettings(String account, String key, TimeValue timeout, int maxRetries) { - this.name = null; - this.account = account; - this.key = key; - this.timeout = timeout; - this.activeByDefault = false; - this.maxRetries = maxRetries; - } - - @Deprecated - public AzureStorageSettings(String name, String account, String key, TimeValue timeout, boolean activeByDefault, int maxRetries) { - this.name = name; this.account = account; this.key = key; this.timeout = timeout; - this.activeByDefault = activeByDefault; this.maxRetries = maxRetries; } - @Deprecated - public String getName() { - return name; - } - public String getKey() { return key; } @@ -126,11 +83,6 @@ public TimeValue getTimeout() { return timeout; } - @Deprecated - public Boolean isActiveByDefault() { - return activeByDefault; - } - public int getMaxRetries() { return maxRetries; } @@ -138,27 +90,14 @@ public int getMaxRetries() { @Override public String toString() { final StringBuilder sb = new StringBuilder("AzureStorageSettings{"); - sb.append("name='").append(name).append('\''); sb.append(", account='").append(account).append('\''); sb.append(", key='").append(key).append('\''); - sb.append(", activeByDefault='").append(activeByDefault).append('\''); sb.append(", timeout=").append(timeout); sb.append(", maxRetries=").append(maxRetries); sb.append('}'); return sb.toString(); } - /** - * Parses settings and read all legacy settings available under cloud.azure.storage.* - * @param settings settings to parse - * @return A tuple with v1 = primary storage and v2 = secondary storage - */ - @Deprecated - public static Tuple> loadLegacy(Settings settings) { - List storageSettings = createStorageSettingsDeprecated(settings); - return Tuple.tuple(getPrimary(storageSettings), getSecondaries(storageSettings)); - } - /** * Parses settings and read all settings available under azure.client.* * @param settings settings to parse @@ -192,25 +131,6 @@ static AzureStorageSettings getClientSettings(Settings settings, String clientNa } } - @Deprecated - private static List createStorageSettingsDeprecated(Settings settings) { - // ignore global timeout which has the same prefix but does not belong to any group - Settings groups = STORAGE_ACCOUNTS.get(settings.filter((k) -> k.equals(Storage.TIMEOUT_SETTING.getKey()) == false)); - List storageSettings = new ArrayList<>(); - for (String groupName : groups.getAsGroups().keySet()) { - storageSettings.add( - new AzureStorageSettings( - groupName, - getValue(settings, groupName, DEPRECATED_ACCOUNT_SETTING), - getValue(settings, groupName, DEPRECATED_KEY_SETTING), - getValue(settings, groupName, DEPRECATED_TIMEOUT_SETTING), - getValue(settings, groupName, DEPRECATED_DEFAULT_SETTING), - getValue(settings, groupName, MAX_RETRIES_SETTING)) - ); - } - return storageSettings; - } - private static T getConfigValue(Settings settings, String clientName, Setting.AffixSetting clientSetting) { Setting concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName); @@ -222,45 +142,4 @@ public static T getValue(Settings settings, String groupName, Setting set String fullKey = k.toConcreteKey(groupName).toString(); return setting.getConcreteSetting(fullKey).get(settings); } - - @Deprecated - private static AzureStorageSettings getPrimary(List settings) { - if (settings.isEmpty()) { - return null; - } else if (settings.size() == 1) { - // the only storage settings belong (implicitly) to the default primary storage - AzureStorageSettings storage = settings.get(0); - return new AzureStorageSettings(storage.getName(), storage.getAccount(), storage.getKey(), storage.getTimeout(), true, - storage.getMaxRetries()); - } else { - AzureStorageSettings primary = null; - for (AzureStorageSettings setting : settings) { - if (setting.isActiveByDefault()) { - if (primary == null) { - primary = setting; - } else { - throw new SettingsException("Multiple default Azure data stores configured: [" + primary.getName() + "] and [" + setting.getName() + "]"); - } - } - } - if (primary == null) { - throw new SettingsException("No default Azure data store configured"); - } - return primary; - } - } - - @Deprecated - private static Map getSecondaries(List settings) { - Map secondaries = new HashMap<>(); - // when only one setting is defined, we don't have secondaries - if (settings.size() > 1) { - for (AzureStorageSettings setting : settings) { - if (setting.isActiveByDefault() == false) { - secondaries.put(setting.getName(), setting); - } - } - } - return secondaries; - } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java index b90d44264ecf7..2816aa963fa59 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java @@ -62,16 +62,9 @@ public Map getRepositories(Environment env, NamedXCo @Override public List> getSettings() { return Arrays.asList( - AzureStorageService.Storage.STORAGE_ACCOUNTS, AzureStorageSettings.ACCOUNT_SETTING, AzureStorageSettings.KEY_SETTING, AzureStorageSettings.TIMEOUT_SETTING ); } - - @Override - public List getSettingsFilter() { - // Cloud storage API settings using a pattern needed to be hidden - return Arrays.asList(AzureStorageService.Storage.PREFIX + "*.account", AzureStorageService.Storage.PREFIX + "*.key"); - } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java index b232ee12e05c4..68c2186d9855e 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java @@ -23,7 +23,6 @@ import com.microsoft.azure.storage.RetryExponentialRetry; import com.microsoft.azure.storage.blob.CloudBlobClient; import org.elasticsearch.common.settings.MockSecureSettings; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; @@ -32,11 +31,6 @@ import java.util.Map; import static org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl.blobNameFromUri; -import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.DEPRECATED_ACCOUNT_SETTING; -import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.DEPRECATED_DEFAULT_SETTING; -import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.DEPRECATED_KEY_SETTING; -import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.DEPRECATED_TIMEOUT_SETTING; -import static org.elasticsearch.repositories.azure.AzureSettingsParserTests.getConcreteSetting; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -45,18 +39,6 @@ public class AzureStorageServiceTests extends ESTestCase { - @Deprecated - static final Settings deprecatedSettings = Settings.builder() - .put("cloud.azure.storage.azure1.account", "myaccount1") - .put("cloud.azure.storage.azure1.key", "mykey1") - .put("cloud.azure.storage.azure1.default", true) - .put("cloud.azure.storage.azure2.account", "myaccount2") - .put("cloud.azure.storage.azure2.key", "mykey2") - .put("cloud.azure.storage.azure3.account", "myaccount3") - .put("cloud.azure.storage.azure3.key", "mykey3") - .put("cloud.azure.storage.azure3.timeout", "30s") - .build(); - private MockSecureSettings buildSecureSettings() { MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("azure.client.azure1.account", "myaccount1"); @@ -102,23 +84,7 @@ public void testGetSelectedClientNonExisting() { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { azureStorageService.getSelectedClient("azure4", LocationMode.PRIMARY_ONLY); }); - assertThat(e.getMessage(), is("Can not find named azure client [azure4]. Check your elasticsearch.yml.")); - } - - public void testGetSelectedClientGlobalTimeout() { - Settings timeoutSettings = Settings.builder() - .setSecureSettings(buildSecureSettings()) - .put(AzureStorageService.Storage.TIMEOUT_SETTING.getKey(), "10s") - .put("azure.client.azure3.timeout", "30s") - .build(); - - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(timeoutSettings); - CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); - assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(10 * 1000)); - CloudBlobClient client3 = azureStorageService.getSelectedClient("azure3", LocationMode.PRIMARY_ONLY); - assertThat(client3.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(30 * 1000)); - - assertSettingDeprecationsAndWarnings(new Setting[]{AzureStorageService.Storage.TIMEOUT_SETTING}); + assertThat(e.getMessage(), is("Can not find named azure client [azure4]. Check your settings.")); } public void testGetSelectedClientDefaultTimeout() { @@ -170,7 +136,7 @@ class AzureStorageServiceMock extends AzureStorageServiceImpl { @Override void createClient(AzureStorageSettings azureStorageSettings) { this.clients.put(azureStorageSettings.getAccount(), - new CloudBlobClient(URI.create("https://" + azureStorageSettings.getName()))); + new CloudBlobClient(URI.create("https://" + azureStorageSettings.getAccount()))); } } @@ -184,87 +150,4 @@ public void testBlobNameFromUri() throws URISyntaxException { name = blobNameFromUri(new URI("https://127.0.0.1/container/path/to/myfile")); assertThat(name, is("path/to/myfile")); } - - // Deprecated settings. We still test them until we remove definitely the deprecated settings - - @Deprecated - public void testGetSelectedClientWithNoSecondary() { - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(Settings.builder() - .put("cloud.azure.storage.azure1.account", "myaccount1") - .put("cloud.azure.storage.azure1.key", "mykey1") - .build()); - CloudBlobClient client = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); - assertThat(client.getEndpoint(), is(URI.create("https://azure1"))); - assertSettingDeprecationsAndWarnings(new Setting[]{ - getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_KEY_SETTING, "azure1") - }); - } - - @Deprecated - public void testGetDefaultClientWithNoSecondary() { - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(Settings.builder() - .put("cloud.azure.storage.azure1.account", "myaccount1") - .put("cloud.azure.storage.azure1.key", "mykey1") - .build()); - CloudBlobClient client = azureStorageService.getSelectedClient("default", LocationMode.PRIMARY_ONLY); - assertThat(client.getEndpoint(), is(URI.create("https://azure1"))); - assertSettingDeprecationsAndWarnings(new Setting[]{ - getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_KEY_SETTING, "azure1") - }); - } - - @Deprecated - public void testGetSelectedClientPrimary() { - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(deprecatedSettings); - CloudBlobClient client = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); - assertThat(client.getEndpoint(), is(URI.create("https://azure1"))); - assertDeprecatedWarnings(); - } - - @Deprecated - public void testGetSelectedClientSecondary1() { - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(deprecatedSettings); - CloudBlobClient client = azureStorageService.getSelectedClient("azure2", LocationMode.PRIMARY_ONLY); - assertThat(client.getEndpoint(), is(URI.create("https://azure2"))); - assertDeprecatedWarnings(); - } - - @Deprecated - public void testGetSelectedClientSecondary2() { - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(deprecatedSettings); - CloudBlobClient client = azureStorageService.getSelectedClient("azure3", LocationMode.PRIMARY_ONLY); - assertThat(client.getEndpoint(), is(URI.create("https://azure3"))); - assertDeprecatedWarnings(); - } - - @Deprecated - public void testGetDefaultClientWithPrimaryAndSecondaries() { - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(deprecatedSettings); - CloudBlobClient client = azureStorageService.getSelectedClient("default", LocationMode.PRIMARY_ONLY); - assertThat(client.getEndpoint(), is(URI.create("https://azure1"))); - assertDeprecatedWarnings(); - } - - @Deprecated - public void testGetSelectedClientDefault() { - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(deprecatedSettings); - CloudBlobClient client = azureStorageService.getSelectedClient("default", LocationMode.PRIMARY_ONLY); - assertThat(client.getEndpoint(), is(URI.create("https://azure1"))); - assertDeprecatedWarnings(); - } - - private void assertDeprecatedWarnings() { - assertSettingDeprecationsAndWarnings(new Setting[]{ - getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_KEY_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_DEFAULT_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure2"), - getConcreteSetting(DEPRECATED_KEY_SETTING, "azure2"), - getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure3"), - getConcreteSetting(DEPRECATED_KEY_SETTING, "azure3"), - getConcreteSetting(DEPRECATED_TIMEOUT_SETTING, "azure3") - }); - } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilterTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilterTests.java deleted file mode 100644 index 17b43715253c8..0000000000000 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilterTests.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cloud.azure.storage; - -import org.elasticsearch.common.inject.ModuleTestCase; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsFilter; -import org.elasticsearch.common.settings.SettingsModule; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.plugin.repository.azure.AzureRepositoryPlugin; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.rest.FakeRestRequest; - -import java.io.IOException; - -import static org.hamcrest.Matchers.contains; - -/** - * TODO as we moved credentials to secure settings, we don't need anymore to keep this test in 7.x - */ -public class AzureStorageSettingsFilterTests extends ESTestCase { - static final Settings settings = Settings.builder() - .put("cloud.azure.storage.azure1.account", "myaccount1") - .put("cloud.azure.storage.azure1.key", "mykey1") - .put("cloud.azure.storage.azure1.default", true) - .put("cloud.azure.storage.azure2.account", "myaccount2") - .put("cloud.azure.storage.azure2.key", "mykey2") - .put("cloud.azure.storage.azure3.account", "myaccount3") - .put("cloud.azure.storage.azure3.key", "mykey3") - .build(); - - public void testSettingsFiltering() throws IOException { - AzureRepositoryPlugin p = new AzureRepositoryPlugin(settings); - SettingsModule module = new SettingsModule(Settings.EMPTY, p.getSettings(), p.getSettingsFilter()); - SettingsFilter settingsFilter = ModuleTestCase.bindAndGetInstance(module, SettingsFilter.class); - - // Test using direct filtering - Settings filteredSettings = settingsFilter.filter(settings); - assertThat(filteredSettings.getAsMap().keySet(), contains("cloud.azure.storage.azure1.default")); - - // Test using toXContent filtering - RestRequest request = new FakeRestRequest(); - settingsFilter.addFilterSettingParams(request); - XContentBuilder xContentBuilder = XContentBuilder.builder(JsonXContent.jsonXContent); - xContentBuilder.startObject(); - settings.toXContent(xContentBuilder, request); - xContentBuilder.endObject(); - String filteredSettingsString = xContentBuilder.string(); - filteredSettings = Settings.builder().loadFromSource(filteredSettingsString, xContentBuilder.contentType()).build(); - assertThat(filteredSettings.getAsMap().keySet(), contains("cloud.azure.storage.azure1.default")); - } - -} diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSettingsParserTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSettingsParserTests.java deleted file mode 100644 index d0fbdb98e0315..0000000000000 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSettingsParserTests.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.repositories.azure; - -import org.elasticsearch.cloud.azure.storage.AzureStorageSettings; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsException; -import org.elasticsearch.test.ESTestCase; - -import java.util.Map; - -import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.DEPRECATED_ACCOUNT_SETTING; -import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.DEPRECATED_DEFAULT_SETTING; -import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.DEPRECATED_KEY_SETTING; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; - -public class AzureSettingsParserTests extends ESTestCase { - - public void testParseTwoSettingsExplicitDefault() { - Settings settings = Settings.builder() - .put("cloud.azure.storage.azure1.account", "myaccount1") - .put("cloud.azure.storage.azure1.key", "mykey1") - .put("cloud.azure.storage.azure1.default", true) - .put("cloud.azure.storage.azure2.account", "myaccount2") - .put("cloud.azure.storage.azure2.key", "mykey2") - .build(); - - Tuple> tuple = AzureStorageSettings.loadLegacy(settings); - assertThat(tuple.v1(), notNullValue()); - assertThat(tuple.v1().getAccount(), is("myaccount1")); - assertThat(tuple.v1().getKey(), is("mykey1")); - assertThat(tuple.v2().keySet(), hasSize(1)); - assertThat(tuple.v2().get("azure2"), notNullValue()); - assertThat(tuple.v2().get("azure2").getAccount(), is("myaccount2")); - assertThat(tuple.v2().get("azure2").getKey(), is("mykey2")); - assertSettingDeprecationsAndWarnings(new Setting[]{ - getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_KEY_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_DEFAULT_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure2"), - getConcreteSetting(DEPRECATED_KEY_SETTING, "azure2") - }); - } - - public void testParseUniqueSettings() { - Settings settings = Settings.builder() - .put("cloud.azure.storage.azure1.account", "myaccount1") - .put("cloud.azure.storage.azure1.key", "mykey1") - .build(); - - Tuple> tuple = AzureStorageSettings.loadLegacy(settings); - assertThat(tuple.v1(), notNullValue()); - assertThat(tuple.v1().getAccount(), is("myaccount1")); - assertThat(tuple.v1().getKey(), is("mykey1")); - assertThat(tuple.v2().keySet(), hasSize(0)); - assertSettingDeprecationsAndWarnings(new Setting[]{ - getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_KEY_SETTING, "azure1") - }); - } - - public void testParseTwoSettingsNoDefault() { - Settings settings = Settings.builder() - .put("cloud.azure.storage.azure1.account", "myaccount1") - .put("cloud.azure.storage.azure1.key", "mykey1") - .put("cloud.azure.storage.azure2.account", "myaccount2") - .put("cloud.azure.storage.azure2.key", "mykey2") - .build(); - - try { - AzureStorageSettings.loadLegacy(settings); - fail("Should have failed with a SettingsException (no default data store)"); - } catch (SettingsException ex) { - assertEquals(ex.getMessage(), "No default Azure data store configured"); - } - assertSettingDeprecationsAndWarnings(new Setting[]{ - getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_KEY_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure2"), - getConcreteSetting(DEPRECATED_KEY_SETTING, "azure2"), - }); - } - - public void testParseTwoSettingsTooManyDefaultSet() { - Settings settings = Settings.builder() - .put("cloud.azure.storage.azure1.account", "myaccount1") - .put("cloud.azure.storage.azure1.key", "mykey1") - .put("cloud.azure.storage.azure1.default", true) - .put("cloud.azure.storage.azure2.account", "myaccount2") - .put("cloud.azure.storage.azure2.key", "mykey2") - .put("cloud.azure.storage.azure2.default", true) - .build(); - - try { - AzureStorageSettings.loadLegacy(settings); - fail("Should have failed with a SettingsException (multiple default data stores)"); - } catch (SettingsException ex) { - assertEquals(ex.getMessage(), "Multiple default Azure data stores configured: [azure1] and [azure2]"); - } - assertSettingDeprecationsAndWarnings(new Setting[]{ - getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_KEY_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_DEFAULT_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure2"), - getConcreteSetting(DEPRECATED_KEY_SETTING, "azure2"), - getConcreteSetting(DEPRECATED_DEFAULT_SETTING, "azure2") - }); - } - - public void testParseEmptySettings() { - Tuple> tuple = AzureStorageSettings.loadLegacy(Settings.EMPTY); - assertThat(tuple.v1(), nullValue()); - assertThat(tuple.v2().keySet(), hasSize(0)); - } - - public static Setting getConcreteSetting(Setting setting, String groupName) { - Setting.AffixKey k = (Setting.AffixKey) setting.getRawKey(); - String concreteKey = k.toConcreteKey(groupName).toString(); - return setting.getConcreteSetting(concreteKey); - } -} From 04b24c778043ecf47b28142fe7484e967d1a5dc3 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 12 Sep 2017 17:54:50 +0200 Subject: [PATCH 11/67] Fix Lucene version of 5.6.1. --- core/src/main/java/org/elasticsearch/Version.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index 441b59def4cc2..b422345aa426c 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -93,7 +93,8 @@ public class Version implements Comparable { public static final int V_5_6_0_ID = 5060099; public static final Version V_5_6_0 = new Version(V_5_6_0_ID, org.apache.lucene.util.Version.LUCENE_6_6_0); public static final int V_5_6_1_ID = 5060199; - public static final Version V_5_6_1 = new Version(V_5_6_1_ID, org.apache.lucene.util.Version.LUCENE_6_6_0); + // use proper Lucene constant once we are on a Lucene snapshot that knows about 6.6.1 + public static final Version V_5_6_1 = new Version(V_5_6_1_ID, org.apache.lucene.util.Version.fromBits(6, 6, 1)); public static final int V_6_0_0_alpha1_ID = 6000001; public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0); From 62a7205577b6e462c27681135b97ce4af051b58e Mon Sep 17 00:00:00 2001 From: Russ Cam Date: Wed, 13 Sep 2017 13:23:12 +1000 Subject: [PATCH 12/67] Add beta tag to MSI Windows Installer (#26616) --- docs/reference/setup/install/windows.asciidoc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/reference/setup/install/windows.asciidoc b/docs/reference/setup/install/windows.asciidoc index 4abf0872779f3..b765466391ccc 100644 --- a/docs/reference/setup/install/windows.asciidoc +++ b/docs/reference/setup/install/windows.asciidoc @@ -1,6 +1,8 @@ [[windows]] === Install Elasticsearch with Windows MSI Installer +beta[] + Elasticsearch can be installed on Windows using the `.msi` package. This can install Elasticsearch as a Windows service or allow it to be run manually using the included `elasticsearch.exe` executable. From a34db4e09f728cd592c73d99c152b40789be257f Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 13 Sep 2017 11:51:55 +0200 Subject: [PATCH 13/67] Support for accessing Azure repositories through a proxy (#23518) You can define a proxy using the following settings: ```yml azure.client.default.proxy.host: proxy.host azure.client.default.proxy.port: 8888 azure.client.default.proxy.type: http ``` Supported values for `proxy.type` are `direct`, `http` or `socks`. Defaults to `direct` (no proxy). Closes #23506 BTW I changed a test `testGetSelectedClientBackoffPolicyNbRetries` as it was using an old setting name `cloud.azure.storage.azure.max_retries` instead of `azure.client.azure1.max_retries`. --- docs/plugins/repository-azure.asciidoc | 13 ++ .../azure/storage/AzureStorageService.java | 3 + .../storage/AzureStorageServiceImpl.java | 43 ++++--- .../azure/storage/AzureStorageSettings.java | 69 ++++++++-- .../azure/AzureRepositoryPlugin.java | 5 +- .../storage/AzureStorageServiceTests.java | 118 +++++++++++++++++- ...zureSnapshotRestoreListSnapshotsTests.java | 4 + .../azure/AzureSnapshotRestoreTests.java | 9 +- 8 files changed, 231 insertions(+), 33 deletions(-) diff --git a/docs/plugins/repository-azure.asciidoc b/docs/plugins/repository-azure.asciidoc index 3fc4a5a0cad21..ea413719f2446 100644 --- a/docs/plugins/repository-azure.asciidoc +++ b/docs/plugins/repository-azure.asciidoc @@ -67,6 +67,19 @@ The Azure Repository plugin works with all Standard storage accounts https://azure.microsoft.com/en-gb/documentation/articles/storage-premium-storage[Premium Locally Redundant Storage] (`Premium_LRS`) is **not supported** as it is only usable as VM disk storage, not as general storage. =============================================== +You can register a proxy per client using the following settings: + +[source,yaml] +---- +azure.client.default.proxy.host: proxy.host +azure.client.default.proxy.port: 8888 +azure.client.default.proxy.type: http +---- + +Supported values for `proxy.type` are `direct` (default), `http` or `socks`. +When `proxy.type` is set to `http` or `socks`, `proxy.host` and `proxy.port` must be provided. + + [[repository-azure-repository-settings]] ===== Repository settings diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java index 778fe44f15a3c..bebfc03c1b7a4 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java @@ -22,13 +22,16 @@ import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.net.Proxy; import java.net.URISyntaxException; +import java.util.Locale; import java.util.Map; /** diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java index cefbbf8c606ee..bb77390868507 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java @@ -21,6 +21,7 @@ import com.microsoft.azure.storage.CloudStorageAccount; import com.microsoft.azure.storage.LocationMode; +import com.microsoft.azure.storage.OperationContext; import com.microsoft.azure.storage.RetryExponentialRetry; import com.microsoft.azure.storage.RetryPolicy; import com.microsoft.azure.storage.StorageException; @@ -29,6 +30,7 @@ import com.microsoft.azure.storage.blob.CloudBlobClient; import com.microsoft.azure.storage.blob.CloudBlobContainer; import com.microsoft.azure.storage.blob.CloudBlockBlob; +import com.microsoft.azure.storage.blob.DeleteSnapshotsOption; import com.microsoft.azure.storage.blob.ListBlobItem; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; @@ -131,12 +133,23 @@ CloudBlobClient getSelectedClient(String clientName, LocationMode mode) { return client; } + private OperationContext generateOperationContext(String clientName) { + OperationContext context = new OperationContext(); + AzureStorageSettings azureStorageSettings = this.storageSettings.get(clientName); + + if (azureStorageSettings.getProxy() != null) { + context.setProxy(azureStorageSettings.getProxy()); + } + + return context; + } + @Override public boolean doesContainerExist(String account, LocationMode mode, String container) { try { CloudBlobClient client = this.getSelectedClient(account, mode); CloudBlobContainer blobContainer = client.getContainerReference(container); - return SocketAccess.doPrivilegedException(blobContainer::exists); + return SocketAccess.doPrivilegedException(() -> blobContainer.exists(null, null, generateOperationContext(account))); } catch (Exception e) { logger.error("can not access container [{}]", container); } @@ -148,7 +161,7 @@ public void removeContainer(String account, LocationMode mode, String container) CloudBlobClient client = this.getSelectedClient(account, mode); CloudBlobContainer blobContainer = client.getContainerReference(container); logger.trace("removing container [{}]", container); - SocketAccess.doPrivilegedException(blobContainer::deleteIfExists); + SocketAccess.doPrivilegedException(() -> blobContainer.deleteIfExists(null, null, generateOperationContext(account))); } @Override @@ -157,7 +170,7 @@ public void createContainer(String account, LocationMode mode, String container) CloudBlobClient client = this.getSelectedClient(account, mode); CloudBlobContainer blobContainer = client.getContainerReference(container); logger.trace("creating container [{}]", container); - SocketAccess.doPrivilegedException(blobContainer::createIfNotExists); + SocketAccess.doPrivilegedException(() -> blobContainer.createIfNotExists(null, null, generateOperationContext(account))); } catch (IllegalArgumentException e) { logger.trace((Supplier) () -> new ParameterizedMessage("fails creating container [{}]", container), e); throw new RepositoryException(container, e.getMessage(), e); @@ -174,7 +187,8 @@ public void deleteFiles(String account, LocationMode mode, String container, Str SocketAccess.doPrivilegedVoidException(() -> { if (blobContainer.exists()) { // We list the blobs using a flat blob listing mode - for (ListBlobItem blobItem : blobContainer.listBlobs(path, true)) { + for (ListBlobItem blobItem : blobContainer.listBlobs(path, true, EnumSet.noneOf(BlobListingDetails.class), null, + generateOperationContext(account))) { String blobName = blobNameFromUri(blobItem.getUri()); logger.trace("removing blob [{}] full URI was [{}]", blobName, blobItem.getUri()); deleteBlob(account, mode, container, blobName); @@ -208,9 +222,9 @@ public boolean blobExists(String account, LocationMode mode, String container, S // Container name must be lower case. CloudBlobClient client = this.getSelectedClient(account, mode); CloudBlobContainer blobContainer = client.getContainerReference(container); - if (SocketAccess.doPrivilegedException(blobContainer::exists)) { + if (SocketAccess.doPrivilegedException(() -> blobContainer.exists(null, null, generateOperationContext(account)))) { CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blob); - return SocketAccess.doPrivilegedException(azureBlob::exists); + return SocketAccess.doPrivilegedException(() -> azureBlob.exists(null, null, generateOperationContext(account))); } return false; @@ -223,10 +237,11 @@ public void deleteBlob(String account, LocationMode mode, String container, Stri // Container name must be lower case. CloudBlobClient client = this.getSelectedClient(account, mode); CloudBlobContainer blobContainer = client.getContainerReference(container); - if (SocketAccess.doPrivilegedException(blobContainer::exists)) { + if (SocketAccess.doPrivilegedException(() -> blobContainer.exists(null, null, generateOperationContext(account)))) { logger.trace("container [{}]: blob [{}] found. removing.", container, blob); CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blob); - SocketAccess.doPrivilegedVoidException(azureBlob::delete); + SocketAccess.doPrivilegedVoidException(() -> azureBlob.delete(DeleteSnapshotsOption.NONE, null, null, + generateOperationContext(account))); } } @@ -235,7 +250,7 @@ public InputStream getInputStream(String account, LocationMode mode, String cont logger.trace("reading container [{}], blob [{}]", container, blob); CloudBlobClient client = this.getSelectedClient(account, mode); CloudBlockBlob blockBlobReference = client.getContainerReference(container).getBlockBlobReference(blob); - return SocketAccess.doPrivilegedException(blockBlobReference::openInputStream); + return SocketAccess.doPrivilegedException(() -> blockBlobReference.openInputStream(null, null, generateOperationContext(account))); } @Override @@ -243,7 +258,7 @@ public OutputStream getOutputStream(String account, LocationMode mode, String co logger.trace("writing container [{}], blob [{}]", container, blob); CloudBlobClient client = this.getSelectedClient(account, mode); CloudBlockBlob blockBlobReference = client.getContainerReference(container).getBlockBlobReference(blob); - return SocketAccess.doPrivilegedException(blockBlobReference::openOutputStream); + return SocketAccess.doPrivilegedException(() -> blockBlobReference.openOutputStream(null, null, generateOperationContext(account))); } @Override @@ -260,7 +275,7 @@ public Map listBlobsByPrefix(String account, LocationMode SocketAccess.doPrivilegedVoidException(() -> { if (blobContainer.exists()) { for (ListBlobItem blobItem : blobContainer.listBlobs(keyPath + (prefix == null ? "" : prefix), false, - enumBlobListingDetails, null, null)) { + enumBlobListingDetails, null, generateOperationContext(account))) { URI uri = blobItem.getUri(); logger.trace("blob url [{}]", uri); @@ -284,11 +299,11 @@ public void moveBlob(String account, LocationMode mode, String container, String CloudBlobClient client = this.getSelectedClient(account, mode); CloudBlobContainer blobContainer = client.getContainerReference(container); CloudBlockBlob blobSource = blobContainer.getBlockBlobReference(sourceBlob); - if (SocketAccess.doPrivilegedException(blobSource::exists)) { + if (SocketAccess.doPrivilegedException(() -> blobSource.exists(null, null, generateOperationContext(account)))) { CloudBlockBlob blobTarget = blobContainer.getBlockBlobReference(targetBlob); SocketAccess.doPrivilegedVoidException(() -> { - blobTarget.startCopy(blobSource); - blobSource.delete(); + blobTarget.startCopy(blobSource, null, null, null, generateOperationContext(account)); + blobSource.delete(DeleteSnapshotsOption.NONE, null, null, generateOperationContext(account)); }); logger.debug("moveBlob container [{}], sourceBlob [{}], targetBlob [{}] -> done", container, sourceBlob, targetBlob); } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java index b33822eee61f0..19473b4810ab7 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java @@ -20,16 +20,23 @@ package org.elasticsearch.cloud.azure.storage; import com.microsoft.azure.storage.RetryPolicy; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureSetting; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.AffixSetting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.TimeValue; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.Proxy; +import java.net.UnknownHostException; import java.util.Collections; import java.util.HashMap; +import java.util.Locale; import java.util.Map; import java.util.Set; @@ -37,38 +44,66 @@ public final class AzureStorageSettings { // prefix for azure client settings private static final String PREFIX = "azure.client."; - /** - * Azure account name - */ - public static final AffixSetting ACCOUNT_SETTING = Setting.affixKeySetting(PREFIX, "account", - key -> SecureSetting.secureString(key, null)); + /** Azure account name */ + public static final AffixSetting ACCOUNT_SETTING = + Setting.affixKeySetting(PREFIX, "account", key -> SecureSetting.secureString(key, null)); - /** - * max_retries: Number of retries in case of Azure errors. Defaults to 3 (RetryPolicy.DEFAULT_CLIENT_RETRY_COUNT). - */ + /** max_retries: Number of retries in case of Azure errors. Defaults to 3 (RetryPolicy.DEFAULT_CLIENT_RETRY_COUNT). */ private static final Setting MAX_RETRIES_SETTING = Setting.affixKeySetting(PREFIX, "max_retries", (key) -> Setting.intSetting(key, RetryPolicy.DEFAULT_CLIENT_RETRY_COUNT, Setting.Property.NodeScope)); - /** - * Azure key - */ + /** Azure key */ public static final AffixSetting KEY_SETTING = Setting.affixKeySetting(PREFIX, "key", key -> SecureSetting.secureString(key, null)); public static final AffixSetting TIMEOUT_SETTING = Setting.affixKeySetting(PREFIX, "timeout", (key) -> Setting.timeSetting(key, TimeValue.timeValueMinutes(-1), Property.NodeScope)); + /** The type of the proxy to connect to azure through. Can be direct (no proxy, default), http or socks */ + public static final AffixSetting PROXY_TYPE_SETTING = Setting.affixKeySetting(PREFIX, "proxy.type", + (key) -> new Setting<>(key, "direct", s -> Proxy.Type.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope)); + + /** The host name of a proxy to connect to azure through. */ + public static final Setting PROXY_HOST_SETTING = Setting.affixKeySetting(PREFIX, "proxy.host", + (key) -> Setting.simpleString(key, Property.NodeScope)); + + /** The port of a proxy to connect to azure through. */ + public static final Setting PROXY_PORT_SETTING = Setting.affixKeySetting(PREFIX, "proxy.port", + (key) -> Setting.intSetting(key, 0, 0, 65535, Setting.Property.NodeScope)); + private final String account; private final String key; private final TimeValue timeout; private final int maxRetries; + private final Proxy proxy; + - public AzureStorageSettings(String account, String key, TimeValue timeout, int maxRetries) { + public AzureStorageSettings(String account, String key, TimeValue timeout, int maxRetries, Proxy.Type proxyType, String proxyHost, + Integer proxyPort) { this.account = account; this.key = key; this.timeout = timeout; this.maxRetries = maxRetries; + + // Register the proxy if we have any + // Validate proxy settings + if (proxyType.equals(Proxy.Type.DIRECT) && (proxyPort != 0 || Strings.hasText(proxyHost))) { + throw new SettingsException("Azure Proxy port or host have been set but proxy type is not defined."); + } + if (proxyType.equals(Proxy.Type.DIRECT) == false && (proxyPort == 0 || Strings.isEmpty(proxyHost))) { + throw new SettingsException("Azure Proxy type has been set but proxy host or port is not defined."); + } + + if (proxyType.equals(Proxy.Type.DIRECT)) { + proxy = null; + } else { + try { + proxy = new Proxy(proxyType, new InetSocketAddress(InetAddress.getByName(proxyHost), proxyPort)); + } catch (UnknownHostException e) { + throw new SettingsException("Azure proxy host is unknown.", e); + } + } } public String getKey() { @@ -87,6 +122,10 @@ public int getMaxRetries() { return maxRetries; } + public Proxy getProxy() { + return proxy; + } + @Override public String toString() { final StringBuilder sb = new StringBuilder("AzureStorageSettings{"); @@ -94,6 +133,7 @@ public String toString() { sb.append(", key='").append(key).append('\''); sb.append(", timeout=").append(timeout); sb.append(", maxRetries=").append(maxRetries); + sb.append(", proxy=").append(proxy); sb.append('}'); return sb.toString(); } @@ -127,7 +167,10 @@ static AzureStorageSettings getClientSettings(Settings settings, String clientNa SecureString key = getConfigValue(settings, clientName, KEY_SETTING)) { return new AzureStorageSettings(account.toString(), key.toString(), getValue(settings, clientName, TIMEOUT_SETTING), - getValue(settings, clientName, MAX_RETRIES_SETTING)); + getValue(settings, clientName, MAX_RETRIES_SETTING), + getValue(settings, clientName, PROXY_TYPE_SETTING), + getValue(settings, clientName, PROXY_HOST_SETTING), + getValue(settings, clientName, PROXY_PORT_SETTING)); } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java index 2816aa963fa59..ed2f6be776dca 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java @@ -64,7 +64,10 @@ public List> getSettings() { return Arrays.asList( AzureStorageSettings.ACCOUNT_SETTING, AzureStorageSettings.KEY_SETTING, - AzureStorageSettings.TIMEOUT_SETTING + AzureStorageSettings.TIMEOUT_SETTING, + AzureStorageSettings.PROXY_TYPE_SETTING, + AzureStorageSettings.PROXY_HOST_SETTING, + AzureStorageSettings.PROXY_PORT_SETTING ); } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java index 68c2186d9855e..6c05b7cdec3ad 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java @@ -20,14 +20,20 @@ package org.elasticsearch.cloud.azure.storage; import com.microsoft.azure.storage.LocationMode; +import com.microsoft.azure.storage.OperationContext; import com.microsoft.azure.storage.RetryExponentialRetry; import com.microsoft.azure.storage.blob.CloudBlobClient; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.test.ESTestCase; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.Proxy; import java.net.URI; import java.net.URISyntaxException; +import java.net.UnknownHostException; import java.util.Map; import static org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl.blobNameFromUri; @@ -115,7 +121,7 @@ public void testGetSelectedClientBackoffPolicy() { public void testGetSelectedClientBackoffPolicyNbRetries() { Settings timeoutSettings = Settings.builder() .setSecureSettings(buildSecureSettings()) - .put("cloud.azure.storage.azure.max_retries", 7) + .put("azure.client.azure1.max_retries", 7) .build(); AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(timeoutSettings); @@ -124,6 +130,116 @@ public void testGetSelectedClientBackoffPolicyNbRetries() { assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), instanceOf(RetryExponentialRetry.class)); } + public void testNoProxy() { + Settings settings = Settings.builder() + .setSecureSettings(buildSecureSettings()) + .build(); + AzureStorageServiceMock mock = new AzureStorageServiceMock(settings); + assertThat(mock.storageSettings.get("azure1").getProxy(), nullValue()); + assertThat(mock.storageSettings.get("azure2").getProxy(), nullValue()); + assertThat(mock.storageSettings.get("azure3").getProxy(), nullValue()); + } + + public void testProxyHttp() throws UnknownHostException { + Settings settings = Settings.builder() + .setSecureSettings(buildSecureSettings()) + .put("azure.client.azure1.proxy.host", "127.0.0.1") + .put("azure.client.azure1.proxy.port", 8080) + .put("azure.client.azure1.proxy.type", "http") + .build(); + AzureStorageServiceMock mock = new AzureStorageServiceMock(settings); + Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); + + assertThat(azure1Proxy, notNullValue()); + assertThat(azure1Proxy.type(), is(Proxy.Type.HTTP)); + assertThat(azure1Proxy.address(), is(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 8080))); + assertThat(mock.storageSettings.get("azure2").getProxy(), nullValue()); + assertThat(mock.storageSettings.get("azure3").getProxy(), nullValue()); + } + + public void testMultipleProxies() throws UnknownHostException { + Settings settings = Settings.builder() + .setSecureSettings(buildSecureSettings()) + .put("azure.client.azure1.proxy.host", "127.0.0.1") + .put("azure.client.azure1.proxy.port", 8080) + .put("azure.client.azure1.proxy.type", "http") + .put("azure.client.azure2.proxy.host", "127.0.0.1") + .put("azure.client.azure2.proxy.port", 8081) + .put("azure.client.azure2.proxy.type", "http") + .build(); + AzureStorageServiceMock mock = new AzureStorageServiceMock(settings); + Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); + assertThat(azure1Proxy, notNullValue()); + assertThat(azure1Proxy.type(), is(Proxy.Type.HTTP)); + assertThat(azure1Proxy.address(), is(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 8080))); + Proxy azure2Proxy = mock.storageSettings.get("azure2").getProxy(); + assertThat(azure2Proxy, notNullValue()); + assertThat(azure2Proxy.type(), is(Proxy.Type.HTTP)); + assertThat(azure2Proxy.address(), is(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 8081))); + assertThat(mock.storageSettings.get("azure3").getProxy(), nullValue()); + } + + public void testProxySocks() throws UnknownHostException { + Settings settings = Settings.builder() + .setSecureSettings(buildSecureSettings()) + .put("azure.client.azure1.proxy.host", "127.0.0.1") + .put("azure.client.azure1.proxy.port", 8080) + .put("azure.client.azure1.proxy.type", "socks") + .build(); + AzureStorageServiceMock mock = new AzureStorageServiceMock(settings); + Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); + assertThat(azure1Proxy, notNullValue()); + assertThat(azure1Proxy.type(), is(Proxy.Type.SOCKS)); + assertThat(azure1Proxy.address(), is(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 8080))); + assertThat(mock.storageSettings.get("azure2").getProxy(), nullValue()); + assertThat(mock.storageSettings.get("azure3").getProxy(), nullValue()); + } + + public void testProxyNoHost() { + Settings settings = Settings.builder() + .setSecureSettings(buildSecureSettings()) + .put("azure.client.azure1.proxy.port", 8080) + .put("azure.client.azure1.proxy.type", randomFrom("socks", "http")) + .build(); + + SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMock(settings)); + assertEquals("Azure Proxy type has been set but proxy host or port is not defined.", e.getMessage()); + } + + public void testProxyNoPort() { + Settings settings = Settings.builder() + .setSecureSettings(buildSecureSettings()) + .put("azure.client.azure1.proxy.host", "127.0.0.1") + .put("azure.client.azure1.proxy.type", randomFrom("socks", "http")) + .build(); + + SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMock(settings)); + assertEquals("Azure Proxy type has been set but proxy host or port is not defined.", e.getMessage()); + } + + public void testProxyNoType() { + Settings settings = Settings.builder() + .setSecureSettings(buildSecureSettings()) + .put("azure.client.azure1.proxy.host", "127.0.0.1") + .put("azure.client.azure1.proxy.port", 8080) + .build(); + + SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMock(settings)); + assertEquals("Azure Proxy port or host have been set but proxy type is not defined.", e.getMessage()); + } + + public void testProxyWrongHost() { + Settings settings = Settings.builder() + .setSecureSettings(buildSecureSettings()) + .put("azure.client.azure1.proxy.type", randomFrom("socks", "http")) + .put("azure.client.azure1.proxy.host", "thisisnotavalidhostorwehavebeensuperunlucky") + .put("azure.client.azure1.proxy.port", 8080) + .build(); + + SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMock(settings)); + assertEquals("Azure proxy host is unknown.", e.getMessage()); + } + /** * This internal class just overload createClient method which is called by AzureStorageServiceImpl.doStart() */ diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreListSnapshotsTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreListSnapshotsTests.java index 6760b418ed50e..989b9541d79cc 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreListSnapshotsTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreListSnapshotsTests.java @@ -35,6 +35,7 @@ import org.junit.Before; import java.net.URISyntaxException; +import java.net.UnknownHostException; import java.util.concurrent.TimeUnit; import static org.elasticsearch.cloud.azure.AzureTestUtils.readSettingsFromFile; @@ -63,6 +64,9 @@ public class AzureSnapshotRestoreListSnapshotsTests extends AbstractAzureWithThi AzureStorageSettings.load(readSettingsFromFile())); private final String containerName = getContainerName(); + public AzureSnapshotRestoreListSnapshotsTests() { + } + public void testList() throws Exception { Client client = client(); logger.info("--> creating azure primary repository"); diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java index 7eb808e7c956e..4ad3608d44354 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java @@ -54,6 +54,7 @@ import java.net.URISyntaxException; import java.util.Arrays; import java.util.Collection; +import java.net.UnknownHostException; import java.util.Locale; import java.util.concurrent.TimeUnit; @@ -103,7 +104,7 @@ public Settings indexSettings() { } @Before @After - public final void wipeAzureRepositories() throws StorageException, URISyntaxException { + public final void wipeAzureRepositories() throws StorageException, URISyntaxException, UnknownHostException { wipeRepositories(); cleanRepositoryFiles( getContainerName(), @@ -455,7 +456,7 @@ private void checkContainerName(final String container, final boolean correct) t try { logger.info("--> remove container [{}]", container); cleanRepositoryFiles(container); - } catch (StorageException | URISyntaxException e) { + } catch (StorageException | URISyntaxException | UnknownHostException ignored) { // We can ignore that as we just try to clean after the test } assertTrue(putRepositoryResponse.isAcknowledged() == correct); @@ -498,7 +499,7 @@ public void testNonExistingRepo_23() { public void testRemoveAndCreateContainer() throws Exception { final String container = getContainerName().concat("-testremove"); final AzureStorageService storageService = new AzureStorageServiceImpl(nodeSettings(0),AzureStorageSettings.load(nodeSettings(0))); - + // It could happen that we run this test really close to a previous one // so we might need some time to be able to create the container assertBusy(() -> { @@ -549,7 +550,7 @@ public static void wipeRepositories(String... repositories) { /** * Purge the test containers */ - public void cleanRepositoryFiles(String... containers) throws StorageException, URISyntaxException { + public void cleanRepositoryFiles(String... containers) throws StorageException, URISyntaxException, UnknownHostException { Settings settings = readSettingsFromFile(); AzureStorageService client = new AzureStorageServiceImpl(settings, AzureStorageSettings.load(settings)); for (String container : containers) { From e00db235bc1aa0765f5b21a9dc40a4e94197b68c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 13 Sep 2017 11:57:06 +0200 Subject: [PATCH 14/67] Add a soft limit for the number of requested doc-value fields (#26574) Requesting to many docvalue_fields in a search request can potentially be costly because it might incur a per-field per-document seek. This change introduces a soft limit on the number of fields that can be retrieved. The setting can be changed per index using the `index.max_docvalue_fields_search` setting. Relates to #26390 --- .../common/settings/IndexScopedSettings.java | 1 + .../elasticsearch/index/IndexSettings.java | 21 +++++++++++++ .../elasticsearch/search/SearchService.java | 8 ++++- .../index/IndexSettingsTests.java | 16 ++++++++++ .../search/SearchServiceTests.java | 30 ++++++++++++++++++- docs/reference/index-modules.asciidoc | 6 ++++ .../rest-api-spec/test/search/30_limits.yml | 22 ++++++++++++++ 7 files changed, 102 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 5a6c17bf2f0c1..92d9cd96a71c7 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -111,6 +111,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.INDEX_REFRESH_INTERVAL_SETTING, IndexSettings.MAX_RESULT_WINDOW_SETTING, IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING, + IndexSettings.MAX_DOCVALUE_FIELDS_SEARCH_SETTING, IndexSettings.MAX_RESCORE_WINDOW_SETTING, IndexSettings.MAX_ADJACENCY_MATRIX_FILTERS_SETTING, IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING, diff --git a/core/src/main/java/org/elasticsearch/index/IndexSettings.java b/core/src/main/java/org/elasticsearch/index/IndexSettings.java index 0bb9db0130472..7899136e68dae 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/core/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -98,6 +98,13 @@ public final class IndexSettings { */ public static final Setting MAX_INNER_RESULT_WINDOW_SETTING = Setting.intSetting("index.max_inner_result_window", 100, 1, Property.Dynamic, Property.IndexScope); + /** + * Index setting describing the maximum value of allowed `docvalue_fields`that can be retrieved + * per search request. The default maximum of 100 is defensive for the reason that retrieving + * doc values might incur a per-field per-document seek. + */ + public static final Setting MAX_DOCVALUE_FIELDS_SEARCH_SETTING = + Setting.intSetting("index.max_docvalue_fields_search", 100, 0, Property.Dynamic, Property.IndexScope); /** * Index setting describing the maximum size of the rescore window. Defaults to {@link #MAX_RESULT_WINDOW_SETTING} * because they both do the same thing: control the size of the heap of hits. @@ -221,6 +228,7 @@ public final class IndexSettings { private volatile int maxInnerResultWindow; private volatile int maxAdjacencyMatrixFilters; private volatile int maxRescoreWindow; + private volatile int maxDocvalueFields; private volatile boolean TTLPurgeDisabled; /** * The maximum number of refresh listeners allows on this shard. @@ -322,6 +330,7 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti maxInnerResultWindow = scopedSettings.get(MAX_INNER_RESULT_WINDOW_SETTING); maxAdjacencyMatrixFilters = scopedSettings.get(MAX_ADJACENCY_MATRIX_FILTERS_SETTING); maxRescoreWindow = scopedSettings.get(MAX_RESCORE_WINDOW_SETTING); + maxDocvalueFields = scopedSettings.get(MAX_DOCVALUE_FIELDS_SEARCH_SETTING); TTLPurgeDisabled = scopedSettings.get(INDEX_TTL_DISABLE_PURGE_SETTING); maxRefreshListeners = scopedSettings.get(MAX_REFRESH_LISTENERS_PER_SHARD); maxSlicesPerScroll = scopedSettings.get(MAX_SLICES_PER_SCROLL); @@ -351,6 +360,7 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti scopedSettings.addSettingsUpdateConsumer(MAX_INNER_RESULT_WINDOW_SETTING, this::setMaxInnerResultWindow); scopedSettings.addSettingsUpdateConsumer(MAX_ADJACENCY_MATRIX_FILTERS_SETTING, this::setMaxAdjacencyMatrixFilters); scopedSettings.addSettingsUpdateConsumer(MAX_RESCORE_WINDOW_SETTING, this::setMaxRescoreWindow); + scopedSettings.addSettingsUpdateConsumer(MAX_DOCVALUE_FIELDS_SEARCH_SETTING, this::setMaxDocvalueFields); scopedSettings.addSettingsUpdateConsumer(INDEX_WARMER_ENABLED_SETTING, this::setEnableWarmer); scopedSettings.addSettingsUpdateConsumer(INDEX_GC_DELETES_SETTING, this::setGCDeletes); scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING, this::setTranslogFlushThresholdSize); @@ -607,6 +617,17 @@ private void setMaxRescoreWindow(int maxRescoreWindow) { this.maxRescoreWindow = maxRescoreWindow; } + /** + * Returns the maximum number of allowed docvalue_fields to retrieve in a search request + */ + public int getMaxDocvalueFields() { + return this.maxDocvalueFields; + } + + private void setMaxDocvalueFields(int maxDocvalueFields) { + this.maxDocvalueFields = maxDocvalueFields; + } + /** * Returns the GC deletes cycle in milliseconds. */ diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 4e7c070bfeef8..0b112a59c628c 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -43,7 +43,6 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.MergeSchedulerConfig; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.query.InnerHitContextBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; @@ -771,6 +770,13 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc context.fetchSourceContext(source.fetchSource()); } if (source.docValueFields() != null) { + int maxAllowedDocvalueFields = context.mapperService().getIndexSettings().getMaxDocvalueFields(); + if (source.docValueFields().size() > maxAllowedDocvalueFields) { + throw new IllegalArgumentException( + "Trying to retrieve too many docvalue_fields. Must be less than or equal to: [" + maxAllowedDocvalueFields + + "] but was [" + source.docValueFields().size() + "]. This limit can be set by changing the [" + + IndexSettings.MAX_DOCVALUE_FIELDS_SEARCH_SETTING.getKey() + "] index level setting."); + } context.docValueFieldsContext(new DocValueFieldsContext(source.docValueFields())); } if (source.highlighter() != null) { diff --git a/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java b/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java index dad2b4e7d9153..bb147b8752efe 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java @@ -310,6 +310,22 @@ public void testMaxInnerResultWindow() { assertEquals(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.get(Settings.EMPTY).intValue(), settings.getMaxInnerResultWindow()); } + public void testMaxDocvalueFields() { + IndexMetaData metaData = newIndexMeta("index", Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexSettings.MAX_DOCVALUE_FIELDS_SEARCH_SETTING.getKey(), 200).build()); + IndexSettings settings = new IndexSettings(metaData, Settings.EMPTY); + assertEquals(200, settings.getMaxDocvalueFields()); + settings.updateIndexMetaData( + newIndexMeta("index", Settings.builder().put(IndexSettings.MAX_DOCVALUE_FIELDS_SEARCH_SETTING.getKey(), 50).build())); + assertEquals(50, settings.getMaxDocvalueFields()); + settings.updateIndexMetaData(newIndexMeta("index", Settings.EMPTY)); + assertEquals(IndexSettings.MAX_DOCVALUE_FIELDS_SEARCH_SETTING.get(Settings.EMPTY).intValue(), settings.getMaxDocvalueFields()); + + metaData = newIndexMeta("index", Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build()); + settings = new IndexSettings(metaData, Settings.EMPTY); + assertEquals(IndexSettings.MAX_DOCVALUE_FIELDS_SEARCH_SETTING.get(Settings.EMPTY).intValue(), settings.getMaxDocvalueFields()); + } + public void testMaxAdjacencyMatrixFiltersSetting() { IndexMetaData metaData = newIndexMeta("index", Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) diff --git a/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 57ae81156ea59..10af4b333ea8b 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -25,7 +25,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.action.search.SearchType; @@ -262,6 +261,35 @@ public void testTimeout() throws IOException { } + /** + * test that getting more than the allowed number of docvalue_fields throws an exception + */ + public void testMaxDocvalueFieldsSearch() throws IOException { + createIndex("index"); + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + // adding the maximum allowed number of docvalue_fields to retrieve + for (int i = 0; i < indexService.getIndexSettings().getMaxDocvalueFields(); i++) { + searchSourceBuilder.docValueField("field" + i); + } + try (SearchContext context = service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, + searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f), null)) { + assertNotNull(context); + searchSourceBuilder.docValueField("one_field_too_much"); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, + () -> service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, + searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f), null)); + assertEquals( + "Trying to retrieve too many docvalue_fields. Must be less than or equal to: [100] but was [101]. " + + "This limit can be set by changing the [index.max_docvalue_fields_search] index level setting.", + ex.getMessage()); + } + } + public static class FailOnRewriteQueryPlugin extends Plugin implements SearchPlugin { @Override public List> getQueries() { diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 5347fd875d798..889f5a6b02e12 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -133,6 +133,12 @@ specific index module: requests take heap memory and time proportional to `max(window_size, from + size)` and this limits that memory. +`index.max_docvalue_fields_search`:: + + The maximum number of `docvalue_fields` that are allowed in a query. + Defaults to `100`. Doc-value fields are costly since they might incur + a per-field per-document seek. + `index.blocks.read_only`:: Set to `true` to make the index and index metadata read only, `false` to diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml index 0fa6692ae521f..4c86d27175c6c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml @@ -50,3 +50,25 @@ setup: match_all: {} query_weight: 1 rescore_query_weight: 2 + +--- +"Docvalues_fields size limit": + - skip: + version: " - 6.99.99" + reason: soft limit for docvalue_fields only available as of 7.0.0 + + - do: + indices.create: + index: test_2 + body: + settings: + index.max_docvalue_fields_search: 2 + + - do: + catch: /Trying to retrieve too many docvalue_fields\. Must be less than or equal to[:] \[2\] but was \[3\]\. This limit can be set by changing the \[index.max_docvalue_fields_search\] index level setting\./ + search: + index: test_2 + body: + query: + match_all: {} + docvalue_fields: ["one", "two", "three"] From b3e7e85cf110b6d48cab4f82fa1fc689975e81de Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 13 Sep 2017 06:16:27 -0400 Subject: [PATCH 15/67] Let search phases override max concurrent requests If the query coordinating node is also a data node that holds all the shards for a search request, we can end up recursing through the can match phase (because we send a local request and on response in the listener move to the next shard and do this again, without ever having returned from previous shards). This recursion can lead to stack overflow for even a reasonable number of indices (daily indices over a sixty days with five shards per day is enough to trigger the stack overflow). Moreover, all this execution would be happening on a network thread (the thread that initially received the query). With this commit, we allow search phases to override max concurrent requests. This allows the can match phase to avoid recursing through the shards towards a stack overflow. Relates #26484 --- .../search/AbstractSearchAsyncAction.java | 4 +- .../search/CanMatchPreFilterSearchPhase.java | 11 ++-- .../action/search/InitialSearchPhase.java | 5 +- .../SearchDfsQueryThenFetchAsyncAction.java | 3 +- .../SearchQueryThenFetchAsyncAction.java | 3 +- .../action/search/SearchTransportService.java | 5 +- .../AbstractSearchAsyncActionTests.java | 5 +- .../CanMatchPreFilterSearchPhaseTests.java | 57 +++++++++++++++++++ .../action/search/SearchAsyncActionTests.java | 9 ++- 9 files changed, 82 insertions(+), 20 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 89be2ecabeb24..c7f1fa5dc5c5e 100644 --- a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -76,8 +76,8 @@ protected AbstractSearchAsyncAction(String name, Logger logger, SearchTransportS Executor executor, SearchRequest request, ActionListener listener, GroupShardsIterator shardsIts, TransportSearchAction.SearchTimeProvider timeProvider, long clusterStateVersion, - SearchTask task, SearchPhaseResults resultConsumer) { - super(name, request, shardsIts, logger); + SearchTask task, SearchPhaseResults resultConsumer, int maxConcurrentShardRequests) { + super(name, request, shardsIts, logger, maxConcurrentShardRequests); this.timeProvider = timeProvider; this.logger = logger; this.searchTransportService = searchTransportService; diff --git a/core/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java b/core/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java index ea5cf831859de..49575125f68d6 100644 --- a/core/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/core/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java @@ -26,10 +26,6 @@ import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.transport.Transport; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; import java.util.Map; import java.util.concurrent.Executor; import java.util.function.BiFunction; @@ -55,9 +51,12 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction listener, GroupShardsIterator shardsIts, TransportSearchAction.SearchTimeProvider timeProvider, long clusterStateVersion, SearchTask task, Function, SearchPhase> phaseFactory) { + /* + * We set max concurrent shard requests to the number of shards to otherwise avoid deep recursing that would occur if the local node + * is the coordinating node for the query, holds all the shards for the request, and there are a lot of shards. + */ super("can_match", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor, request, - listener, - shardsIts, timeProvider, clusterStateVersion, task, new BitSetSearchPhaseResults(shardsIts.size())); + listener, shardsIts, timeProvider, clusterStateVersion, task, new BitSetSearchPhaseResults(shardsIts.size()), shardsIts.size()); this.phaseFactory = phaseFactory; this.shardsIts = shardsIts; } diff --git a/core/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java b/core/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java index fcee980379bf1..a68d1d599c593 100644 --- a/core/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java +++ b/core/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java @@ -52,7 +52,8 @@ abstract class InitialSearchPhase extends private final AtomicInteger shardExecutionIndex = new AtomicInteger(0); private final int maxConcurrentShardRequests; - InitialSearchPhase(String name, SearchRequest request, GroupShardsIterator shardsIts, Logger logger) { + InitialSearchPhase(String name, SearchRequest request, GroupShardsIterator shardsIts, Logger logger, + int maxConcurrentShardRequests) { super(name); this.request = request; this.shardsIts = shardsIts; @@ -62,7 +63,7 @@ abstract class InitialSearchPhase extends // on a per shards level we use shardIt.remaining() to increment the totalOps pointer but add 1 for the current shard result // we process hence we add one for the non active partition here. this.expectedTotalOps = shardsIts.totalSizeWith1ForEmpty(); - maxConcurrentShardRequests = Math.min(request.getMaxConcurrentShardRequests(), shardsIts.size()); + this.maxConcurrentShardRequests = Math.min(maxConcurrentShardRequests, shardsIts.size()); } private void onShardFailure(final int shardIndex, @Nullable ShardRouting shard, @Nullable String nodeId, diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index a901d71157137..ec055dfec8df6 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -42,7 +42,8 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction final GroupShardsIterator shardsIts, final TransportSearchAction.SearchTimeProvider timeProvider, final long clusterStateVersion, final SearchTask task) { super("dfs", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor, request, listener, - shardsIts, timeProvider, clusterStateVersion, task, new ArraySearchPhaseResults<>(shardsIts.size())); + shardsIts, timeProvider, clusterStateVersion, task, new ArraySearchPhaseResults<>(shardsIts.size()), + request.getMaxConcurrentShardRequests()); this.searchPhaseController = searchPhaseController; } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index de8109aadd8fe..5ddd1df231d17 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -42,7 +42,8 @@ final class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction shardsIts, final TransportSearchAction.SearchTimeProvider timeProvider, long clusterStateVersion, SearchTask task) { super("query", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor, request, listener, - shardsIts, timeProvider, clusterStateVersion, task, searchPhaseController.newSearchPhaseResults(request, shardsIts.size())); + shardsIts, timeProvider, clusterStateVersion, task, searchPhaseController.newSearchPhaseResults(request, shardsIts.size()), + request.getMaxConcurrentShardRequests()); this.searchPhaseController = searchPhaseController; } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index dba382aed6cf7..d4fd7b609ee4d 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -47,9 +47,9 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterService; +import org.elasticsearch.transport.TaskAwareTransportRequestHandler; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportActionProxy; -import org.elasticsearch.transport.TaskAwareTransportRequestHandler; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; @@ -59,7 +59,6 @@ import java.io.IOException; import java.io.UncheckedIOException; -import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.function.BiFunction; @@ -447,7 +446,7 @@ public void messageReceived(ShardFetchSearchRequest request, TransportChannel ch }); TransportActionProxy.registerProxyAction(transportService, FETCH_ID_ACTION_NAME, FetchSearchResult::new); - // this is super cheap and should not hit thread-pool rejections + // this is cheap, it does not fetch during the rewrite phase, so we can let it quickly execute on a networking thread transportService.registerRequestHandler(QUERY_CAN_MATCH_NAME, ThreadPool.Names.SAME, ShardSearchTransportRequest::new, new TaskAwareTransportRequestHandler() { @Override diff --git a/core/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java b/core/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java index ec78f1892f90f..8f413eb436421 100644 --- a/core/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java @@ -60,11 +60,12 @@ private AbstractSearchAsyncAction createAction( System::nanoTime); } + final SearchRequest request = new SearchRequest(); return new AbstractSearchAsyncAction("test", null, null, null, Collections.singletonMap("foo", new AliasFilter(new MatchAllQueryBuilder())), Collections.singletonMap("foo", 2.0f), null, - new SearchRequest(), null, new GroupShardsIterator<>(Collections.singletonList( + request, null, new GroupShardsIterator<>(Collections.singletonList( new SearchShardIterator(null, null, Collections.emptyList(), null))), timeProvider, 0, null, - new InitialSearchPhase.ArraySearchPhaseResults<>(10)) { + new InitialSearchPhase.ArraySearchPhaseResults<>(10), request.getMaxConcurrentShardRequests()) { @Override protected SearchPhase getNextPhase(final SearchPhaseResults results, final SearchPhaseContext context) { return null; diff --git a/core/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java b/core/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java index 87cebc957c6c0..373173a1fc6a8 100644 --- a/core/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java @@ -170,4 +170,61 @@ public void run() throws IOException { assertEquals(shard1, !result.get().get(0).skip()); assertFalse(result.get().get(1).skip()); // never skip the failure } + + /* + * In cases that a query coordinating node held all the shards for a query, the can match phase would recurse and end in stack overflow + * when subjected to max concurrent search requests. This test is a test for that situation. + */ + public void testLotsOfShards() throws InterruptedException { + final TransportSearchAction.SearchTimeProvider timeProvider = + new TransportSearchAction.SearchTimeProvider(0, System.nanoTime(), System::nanoTime); + + final Map lookup = new ConcurrentHashMap<>(); + final DiscoveryNode primaryNode = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); + final DiscoveryNode replicaNode = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT); + lookup.put("node1", new SearchAsyncActionTests.MockConnection(primaryNode)); + lookup.put("node2", new SearchAsyncActionTests.MockConnection(replicaNode)); + + final SearchTransportService searchTransportService = + new SearchTransportService(Settings.builder().put("search.remote.connect", false).build(), null, null) { + @Override + public void sendCanMatch( + Transport.Connection connection, + ShardSearchTransportRequest request, + SearchTask task, + ActionListener listener) { + listener.onResponse(new CanMatchResponse(randomBoolean())); + } + }; + + final AtomicReference> result = new AtomicReference<>(); + final CountDownLatch latch = new CountDownLatch(1); + final OriginalIndices originalIndices = new OriginalIndices(new String[]{"idx"}, IndicesOptions.strictExpandOpenAndForbidClosed()); + final GroupShardsIterator shardsIter = + SearchAsyncActionTests.getShardsIter("idx", originalIndices, 2048, randomBoolean(), primaryNode, replicaNode); + final CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase( + logger, + searchTransportService, + (clusterAlias, node) -> lookup.get(node), + Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)), + Collections.emptyMap(), + EsExecutors.newDirectExecutorService(), + new SearchRequest(), + null, + shardsIter, + timeProvider, + 0, + null, + (iter) -> new SearchPhase("test") { + @Override + public void run() throws IOException { + result.set(iter); + latch.countDown(); + }}); + + canMatchPhase.start(); + latch.await(); + + } + } diff --git a/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java b/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java index 3ee681383cd27..b9602f26346dc 100644 --- a/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java @@ -110,7 +110,8 @@ public void onFailure(Exception e) { new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0), 0, null, - new InitialSearchPhase.ArraySearchPhaseResults<>(shardsIter.size())) { + new InitialSearchPhase.ArraySearchPhaseResults<>(shardsIter.size()), + request.getMaxConcurrentShardRequests()) { @Override protected void executePhaseOnShard(SearchShardIterator shardIt, ShardRouting shard, @@ -199,7 +200,8 @@ public void onFailure(Exception e) { new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0), 0, null, - new InitialSearchPhase.ArraySearchPhaseResults<>(shardsIter.size())) { + new InitialSearchPhase.ArraySearchPhaseResults<>(shardsIter.size()), + request.getMaxConcurrentShardRequests()) { @Override protected void executePhaseOnShard(SearchShardIterator shardIt, ShardRouting shard, @@ -300,7 +302,8 @@ public void sendFreeContext(Transport.Connection connection, long contextId, Ori new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0), 0, null, - new InitialSearchPhase.ArraySearchPhaseResults<>(shardsIter.size())) { + new InitialSearchPhase.ArraySearchPhaseResults<>(shardsIter.size()), + request.getMaxConcurrentShardRequests()) { TestSearchResponse response = new TestSearchResponse(); @Override From d2cfad6187330f98185f277efcefef06781eea48 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 13 Sep 2017 13:15:31 +0200 Subject: [PATCH 16/67] [Tests] Remove skip tests in search/30_limits.yml After backporting the related change to the 6.x branches, this test can now also be run in a mixed cluster. Relates to #26574 --- .../rest-api-spec/test/search/30_limits.yml | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml index 4c86d27175c6c..c8f0009658d22 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml @@ -1,4 +1,11 @@ setup: + - do: + indices.create: + index: test_1 + body: + settings: + index.max_docvalue_fields_search: 2 + - do: index: index: test_1 @@ -53,21 +60,11 @@ setup: --- "Docvalues_fields size limit": - - skip: - version: " - 6.99.99" - reason: soft limit for docvalue_fields only available as of 7.0.0 - - - do: - indices.create: - index: test_2 - body: - settings: - index.max_docvalue_fields_search: 2 - do: catch: /Trying to retrieve too many docvalue_fields\. Must be less than or equal to[:] \[2\] but was \[3\]\. This limit can be set by changing the \[index.max_docvalue_fields_search\] index level setting\./ search: - index: test_2 + index: test_1 body: query: match_all: {} From 9e05b3260bf2ad234d903ae6be3b58b6cd451492 Mon Sep 17 00:00:00 2001 From: Ivan Brusic Date: Wed, 13 Sep 2017 04:58:30 -0700 Subject: [PATCH 17/67] Add boolean similarity to built in similarity types (#26613) --- .../index/similarity/SimilarityService.java | 21 ++++++++----------- 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java b/core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java index 5c4a425828f22..e1080f2c2ccae 100644 --- a/core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java +++ b/core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java @@ -46,30 +46,27 @@ public final class SimilarityService extends AbstractIndexComponent { public static final Map BUILT_IN; static { Map defaults = new HashMap<>(); - Map buildIn = new HashMap<>(); defaults.put("classic", (name, settings, indexSettings, scriptService) -> new ClassicSimilarityProvider(name, settings, indexSettings)); defaults.put("BM25", (name, settings, indexSettings, scriptService) -> new BM25SimilarityProvider(name, settings, indexSettings)); defaults.put("boolean", (name, settings, indexSettings, scriptService) -> new BooleanSimilarityProvider(name, settings, indexSettings)); - buildIn.put("classic", - (name, settings, indexSettings, scriptService) -> new ClassicSimilarityProvider(name, settings, indexSettings)); - buildIn.put("BM25", - (name, settings, indexSettings, scriptService) -> new BM25SimilarityProvider(name, settings, indexSettings)); - buildIn.put("DFR", + + Map builtIn = new HashMap<>(defaults); + builtIn.put("DFR", (name, settings, indexSettings, scriptService) -> new DFRSimilarityProvider(name, settings, indexSettings)); - buildIn.put("IB", + builtIn.put("IB", (name, settings, indexSettings, scriptService) -> new IBSimilarityProvider(name, settings, indexSettings)); - buildIn.put("LMDirichlet", + builtIn.put("LMDirichlet", (name, settings, indexSettings, scriptService) -> new LMDirichletSimilarityProvider(name, settings, indexSettings)); - buildIn.put("LMJelinekMercer", + builtIn.put("LMJelinekMercer", (name, settings, indexSettings, scriptService) -> new LMJelinekMercerSimilarityProvider(name, settings, indexSettings)); - buildIn.put("DFI", + builtIn.put("DFI", (name, settings, indexSettings, scriptService) -> new DFISimilarityProvider(name, settings, indexSettings)); - buildIn.put("scripted", ScriptedSimilarityProvider::new); + builtIn.put("scripted", ScriptedSimilarityProvider::new); DEFAULTS = Collections.unmodifiableMap(defaults); - BUILT_IN = Collections.unmodifiableMap(buildIn); + BUILT_IN = Collections.unmodifiableMap(builtIn); } public SimilarityService(IndexSettings indexSettings, ScriptService scriptService, From 670849894f2b1e763f15e537f0b4460f997d5a92 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 13 Sep 2017 08:17:16 -0400 Subject: [PATCH 18/67] Ensure module is bundled before installing in tests This commit adds a dependency to the install module task on the task that builds the module. This is needed for standalone integration tests that require other modules to be installed. Without this, we do not have a guarantee that the module is bundled. --- .../org/elasticsearch/gradle/test/ClusterFormationTasks.groovy | 1 + 1 file changed, 1 insertion(+) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 14e2ca25e63fb..217ecb4ed90fb 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -489,6 +489,7 @@ class ClusterFormationTasks { } Copy installModule = project.tasks.create(name, Copy.class) installModule.dependsOn(setup) + installModule.dependsOn(module.tasks.bundlePlugin) installModule.into(new File(node.homeDir, "modules/${module.name}")) installModule.from({ project.zipTree(module.tasks.bundlePlugin.outputs.files.singleFile) }) return installModule From 454cfc2ceab7471315df57cbd4aaf4f6addcb09b Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 13 Sep 2017 15:26:33 +0200 Subject: [PATCH 19/67] More efficient encoding of range fields. (#26470) This PR removes the vInt that precedes every value in order to know how long they are. Instead the query takes an enum that tells how to compute the length of values: for fixed-length data (ip addresses, double, float) the length is a constant while longs and integers use a variable-length representation that allows the length to be computed from the encoded values. Also the encoding of ints/longs was made a bit more efficient in order not to waste 3 bits in the header. As a consequence, values between -8 and 7 can now be encoded on 1 byte and values between -2048 and 2047 can now be encoded on 2 bytes or less. Closes #26443 --- .../queries/BinaryDocValuesRangeQuery.java | 71 ++++++-- .../index/mapper/BinaryRangeUtil.java | 153 ++++++++++-------- .../index/mapper/RangeFieldMapper.java | 93 +++++++---- .../index/mapper/BinaryRangeUtilTests.java | 87 ++++++++-- 4 files changed, 273 insertions(+), 131 deletions(-) diff --git a/core/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java b/core/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java index c8f78ab616d3f..f5d86849e56d1 100644 --- a/core/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java +++ b/core/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java @@ -37,15 +37,18 @@ public final class BinaryDocValuesRangeQuery extends Query { private final String fieldName; private final QueryType queryType; + private final LengthType lengthType; private final BytesRef from; private final BytesRef to; private final Object originalFrom; private final Object originalTo; - public BinaryDocValuesRangeQuery(String fieldName, QueryType queryType, BytesRef from, BytesRef to, + public BinaryDocValuesRangeQuery(String fieldName, QueryType queryType, LengthType lengthType, + BytesRef from, BytesRef to, Object originalFrom, Object originalTo) { this.fieldName = fieldName; this.queryType = queryType; + this.lengthType = lengthType; this.from = from; this.to = to; this.originalFrom = originalFrom; @@ -66,29 +69,34 @@ public Scorer scorer(LeafReaderContext context) throws IOException { final TwoPhaseIterator iterator = new TwoPhaseIterator(values) { ByteArrayDataInput in = new ByteArrayDataInput(); - BytesRef otherFrom = new BytesRef(16); - BytesRef otherTo = new BytesRef(16); + BytesRef otherFrom = new BytesRef(); + BytesRef otherTo = new BytesRef(); @Override public boolean matches() throws IOException { BytesRef encodedRanges = values.binaryValue(); in.reset(encodedRanges.bytes, encodedRanges.offset, encodedRanges.length); int numRanges = in.readVInt(); + final byte[] bytes = encodedRanges.bytes; + otherFrom.bytes = bytes; + otherTo.bytes = bytes; + int offset = in.getPosition(); for (int i = 0; i < numRanges; i++) { - otherFrom.length = in.readVInt(); - otherFrom.bytes = encodedRanges.bytes; - otherFrom.offset = in.getPosition(); - in.skipBytes(otherFrom.length); + int length = lengthType.readLength(bytes, offset); + otherFrom.offset = offset; + otherFrom.length = length; + offset += length; - otherTo.length = in.readVInt(); - otherTo.bytes = encodedRanges.bytes; - otherTo.offset = in.getPosition(); - in.skipBytes(otherTo.length); + length = lengthType.readLength(bytes, offset); + otherTo.offset = offset; + otherTo.length = length; + offset += length; if (queryType.matches(from, to, otherFrom, otherTo)) { return true; } } + assert offset == encodedRanges.offset + encodedRanges.length; return false; } @@ -114,13 +122,14 @@ public boolean equals(Object o) { BinaryDocValuesRangeQuery that = (BinaryDocValuesRangeQuery) o; return Objects.equals(fieldName, that.fieldName) && queryType == that.queryType && + lengthType == that.lengthType && Objects.equals(from, that.from) && Objects.equals(to, that.to); } @Override public int hashCode() { - return Objects.hash(getClass(), fieldName, queryType, from, to); + return Objects.hash(getClass(), fieldName, queryType, lengthType, from, to); } public enum QueryType { @@ -161,4 +170,42 @@ boolean matches(BytesRef from, BytesRef to, BytesRef otherFrom, BytesRef otherTo } + public enum LengthType { + FIXED_4 { + @Override + int readLength(byte[] bytes, int offset) { + return 4; + } + }, + FIXED_8 { + @Override + int readLength(byte[] bytes, int offset) { + return 8; + } + }, + FIXED_16 { + @Override + int readLength(byte[] bytes, int offset) { + return 16; + } + }, + VARIABLE { + @Override + int readLength(byte[] bytes, int offset) { + // the first bit encodes the sign and the next 4 bits encode the number + // of additional bytes + int token = Byte.toUnsignedInt(bytes[offset]); + int length = (token >>> 3) & 0x0f; + if ((token & 0x80) == 0) { + length = 0x0f - length; + } + return 1 + length; + } + }; + + /** + * Return the length of the value that starts at {@code offset} in {@code bytes}. + */ + abstract int readLength(byte[] bytes, int offset); + } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/BinaryRangeUtil.java b/core/src/main/java/org/elasticsearch/index/mapper/BinaryRangeUtil.java index e2b618bc222e7..384ab24a73bf6 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/BinaryRangeUtil.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/BinaryRangeUtil.java @@ -18,11 +18,14 @@ */ package org.elasticsearch.index.mapper; +import org.apache.lucene.document.HalfFloatPoint; import org.apache.lucene.store.ByteArrayDataOutput; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.NumericUtils; import java.io.IOException; import java.util.ArrayList; +import java.util.Comparator; import java.util.List; import java.util.Set; @@ -32,28 +35,17 @@ enum BinaryRangeUtil { static BytesRef encodeLongRanges(Set ranges) throws IOException { List sortedRanges = new ArrayList<>(ranges); - sortedRanges.sort((r1, r2) -> { - long r1From = ((Number) r1.from).longValue(); - long r2From = ((Number) r2.from).longValue(); - int cmp = Long.compare(r1From, r2From); - if (cmp != 0) { - return cmp; - } else { - long r1To = ((Number) r1.from).longValue(); - long r2To = ((Number) r2.from).longValue(); - return Long.compare(r1To, r2To); - } - }); + Comparator fromComparator = Comparator.comparingLong(range -> ((Number) range.from).longValue()); + Comparator toComparator = Comparator.comparingLong(range -> ((Number) range.to).longValue()); + sortedRanges.sort(fromComparator.thenComparing(toComparator)); - final byte[] encoded = new byte[5 + ((5 + 9) * 2) * sortedRanges.size()]; + final byte[] encoded = new byte[5 + (9 * 2) * sortedRanges.size()]; ByteArrayDataOutput out = new ByteArrayDataOutput(encoded); out.writeVInt(sortedRanges.size()); for (RangeFieldMapper.Range range : sortedRanges) { - byte[] encodedFrom = encode(((Number) range.from).longValue()); - out.writeVInt(encodedFrom.length); + byte[] encodedFrom = encodeLong(((Number) range.from).longValue()); out.writeBytes(encodedFrom, encodedFrom.length); - byte[] encodedTo = encode(((Number) range.to).longValue()); - out.writeVInt(encodedTo.length); + byte[] encodedTo = encodeLong(((Number) range.to).longValue()); out.writeBytes(encodedTo, encodedTo.length); } return new BytesRef(encoded, 0, out.getPosition()); @@ -61,38 +53,59 @@ static BytesRef encodeLongRanges(Set ranges) throws IOEx static BytesRef encodeDoubleRanges(Set ranges) throws IOException { List sortedRanges = new ArrayList<>(ranges); - sortedRanges.sort((r1, r2) -> { - double r1From = ((Number) r1.from).doubleValue(); - double r2From = ((Number) r2.from).doubleValue(); - int cmp = Double.compare(r1From, r2From); - if (cmp != 0) { - return cmp; - } else { - double r1To = ((Number) r1.from).doubleValue(); - double r2To = ((Number) r2.from).doubleValue(); - return Double.compare(r1To, r2To); - } - }); + Comparator fromComparator = Comparator.comparingDouble(range -> ((Number) range.from).doubleValue()); + Comparator toComparator = Comparator.comparingDouble(range -> ((Number) range.to).doubleValue()); + sortedRanges.sort(fromComparator.thenComparing(toComparator)); - final byte[] encoded = new byte[5 + ((5 + 9) * 2) * sortedRanges.size()]; + final byte[] encoded = new byte[5 + (8 * 2) * sortedRanges.size()]; ByteArrayDataOutput out = new ByteArrayDataOutput(encoded); out.writeVInt(sortedRanges.size()); for (RangeFieldMapper.Range range : sortedRanges) { - byte[] encodedFrom = BinaryRangeUtil.encode(((Number) range.from).doubleValue()); - out.writeVInt(encodedFrom.length); + byte[] encodedFrom = encodeDouble(((Number) range.from).doubleValue()); out.writeBytes(encodedFrom, encodedFrom.length); - byte[] encodedTo = BinaryRangeUtil.encode(((Number) range.to).doubleValue()); - out.writeVInt(encodedTo.length); + byte[] encodedTo = encodeDouble(((Number) range.to).doubleValue()); out.writeBytes(encodedTo, encodedTo.length); } return new BytesRef(encoded, 0, out.getPosition()); } + static BytesRef encodeFloatRanges(Set ranges) throws IOException { + List sortedRanges = new ArrayList<>(ranges); + Comparator fromComparator = Comparator.comparingDouble(range -> ((Number) range.from).floatValue()); + Comparator toComparator = Comparator.comparingDouble(range -> ((Number) range.to).floatValue()); + sortedRanges.sort(fromComparator.thenComparing(toComparator)); + + final byte[] encoded = new byte[5 + (4 * 2) * sortedRanges.size()]; + ByteArrayDataOutput out = new ByteArrayDataOutput(encoded); + out.writeVInt(sortedRanges.size()); + for (RangeFieldMapper.Range range : sortedRanges) { + byte[] encodedFrom = encodeFloat(((Number) range.from).floatValue()); + out.writeBytes(encodedFrom, encodedFrom.length); + byte[] encodedTo = encodeFloat(((Number) range.to).floatValue()); + out.writeBytes(encodedTo, encodedTo.length); + } + return new BytesRef(encoded, 0, out.getPosition()); + } + + static byte[] encodeDouble(double number) { + byte[] encoded = new byte[8]; + NumericUtils.longToSortableBytes(NumericUtils.doubleToSortableLong(number), encoded, 0); + return encoded; + } + + static byte[] encodeFloat(float number) { + byte[] encoded = new byte[4]; + NumericUtils.intToSortableBytes(NumericUtils.floatToSortableInt(number), encoded, 0); + return encoded; + } + /** * Encodes the specified number of type long in a variable-length byte format. * The byte format preserves ordering, which means the returned byte array can be used for comparing as is. + * The first bit stores the sign and the 4 subsequent bits encode the number of bytes that are used to + * represent the long value, in addition to the first one. */ - static byte[] encode(long number) { + static byte[] encodeLong(long number) { int sign = 1; // means positive if (number < 0) { number = -1 - number; @@ -101,46 +114,48 @@ static byte[] encode(long number) { return encode(number, sign); } - /** - * Encodes the specified number of type double in a variable-length byte format. - * The byte format preserves ordering, which means the returned byte array can be used for comparing as is. - */ - static byte[] encode(double number) { - long l; - int sign; - if (number < 0.0) { - l = Double.doubleToRawLongBits(-0d - number); - sign = 0; - } else { - l = Double.doubleToRawLongBits(number); - sign = 1; // means positive - } - return encode(l, sign); - } - private static byte[] encode(long l, int sign) { assert l >= 0; - int bits = 64 - Long.numberOfLeadingZeros(l); - int numBytes = (bits + 7) / 8; // between 0 and 8 - byte[] encoded = new byte[1 + numBytes]; - // encode the sign first to make sure positive values compare greater than negative values - // and then the number of bytes, to make sure that large values compare greater than low values - if (sign > 0) { - encoded[0] = (byte) ((sign << 4) | numBytes); - } else { - encoded[0] = (byte) ((sign << 4) | (8 - numBytes)); + // the header is formed of: + // - 1 bit for the sign + // - 4 bits for the number of additional bytes + // - up to 3 bits of the value + // additional bytes are data bytes + + int numBits = 64 - Long.numberOfLeadingZeros(l); + int numAdditionalBytes = (numBits + 7 - 3) / 8; + + byte[] encoded = new byte[1 + numAdditionalBytes]; + + // write data bytes + int i = encoded.length; + while (numBits > 0) { + int index = --i; + assert index > 0 || numBits <= 3; // byte 0 can't encode more than 3 bits + encoded[index] = (byte) l; + l >>>= 8; + numBits -= 8; } - for (int b = 0; b < numBytes; ++b) { - if (sign == 1) { - encoded[encoded.length - 1 - b] = (byte) (l >>> (8 * b)); - } else if (sign == 0) { - encoded[encoded.length - 1 - b] = (byte) (0xFF - ((l >>> (8 * b)) & 0xFF)); - } else { - throw new AssertionError(); + assert Byte.toUnsignedInt(encoded[0]) <= 0x07; + assert encoded.length == 1 || encoded[0] != 0 || Byte.toUnsignedInt(encoded[1]) > 0x07; + + if (sign == 0) { + // reverse the order + for (int j = 0; j < encoded.length; ++j) { + encoded[j] = (byte) ~Byte.toUnsignedInt(encoded[j]); } + // the first byte only uses 3 bits, we need the 5 upper bits for the header + encoded[0] &= 0x07; + } + + // write the header + encoded[0] |= sign << 7; + if (sign > 0) { + encoded[0] |= numAdditionalBytes << 3; + } else { + encoded[0] |= (15 - numAdditionalBytes) << 3; } return encoded; } - } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java index ed77e0a97aa1f..c243858cd9155 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java @@ -477,12 +477,10 @@ public BytesRef encodeRanges(Set ranges) throws IOException { ByteArrayDataOutput out = new ByteArrayDataOutput(encoded); out.writeVInt(ranges.size()); for (Range range : ranges) { - out.writeVInt(16); InetAddress fromValue = (InetAddress) range.from; byte[] encodedFromValue = InetAddressPoint.encode(fromValue); out.writeBytes(encodedFromValue, 0, encodedFromValue.length); - out.writeVInt(16); InetAddress toValue = (InetAddress) range.to; byte[] encodedToValue = InetAddressPoint.encode(toValue); out.writeBytes(encodedToValue, 0, encodedToValue.length); @@ -491,10 +489,19 @@ public BytesRef encodeRanges(Set ranges) throws IOException { } @Override - BytesRef[] encodeRange(Object from, Object to) { - BytesRef encodedFrom = new BytesRef(InetAddressPoint.encode((InetAddress) from)); - BytesRef encodedTo = new BytesRef(InetAddressPoint.encode((InetAddress) to)); - return new BytesRef[]{encodedFrom, encodedTo}; + public Query dvRangeQuery(String field, QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) { + if (includeFrom == false) { + from = nextUp(from); + } + + if (includeTo == false) { + to = nextDown(to); + } + + byte[] encodedFrom = InetAddressPoint.encode((InetAddress) from); + byte[] encodedTo = InetAddressPoint.encode((InetAddress) to); + return new BinaryDocValuesRangeQuery(field, queryType, BinaryDocValuesRangeQuery.LengthType.FIXED_16, + new BytesRef(encodedFrom), new BytesRef(encodedTo), from, to); } @Override @@ -565,8 +572,8 @@ public BytesRef encodeRanges(Set ranges) throws IOException { } @Override - BytesRef[] encodeRange(Object from, Object to) { - return LONG.encodeRange(from, to); + public Query dvRangeQuery(String field, QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) { + return LONG.dvRangeQuery(field, queryType, from, to, includeFrom, includeTo); } @Override @@ -620,12 +627,23 @@ public Float nextDown(Object value) { @Override public BytesRef encodeRanges(Set ranges) throws IOException { - return DOUBLE.encodeRanges(ranges); + return BinaryRangeUtil.encodeFloatRanges(ranges); } @Override - BytesRef[] encodeRange(Object from, Object to) { - return DOUBLE.encodeRange(((Number) from).floatValue(), ((Number) to).floatValue()); + public Query dvRangeQuery(String field, QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) { + if (includeFrom == false) { + from = nextUp(from); + } + + if (includeTo == false) { + to = nextDown(to); + } + + byte[] encodedFrom = BinaryRangeUtil.encodeFloat((Float) from); + byte[] encodedTo = BinaryRangeUtil.encodeFloat((Float) to); + return new BinaryDocValuesRangeQuery(field, queryType, BinaryDocValuesRangeQuery.LengthType.FIXED_4, + new BytesRef(encodedFrom), new BytesRef(encodedTo), from, to); } @Override @@ -675,10 +693,19 @@ public BytesRef encodeRanges(Set ranges) throws IOException { } @Override - BytesRef[] encodeRange(Object from, Object to) { - byte[] fromValue = BinaryRangeUtil.encode(((Number) from).doubleValue()); - byte[] toValue = BinaryRangeUtil.encode(((Number) to).doubleValue()); - return new BytesRef[]{new BytesRef(fromValue), new BytesRef(toValue)}; + public Query dvRangeQuery(String field, QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) { + if (includeFrom == false) { + from = nextUp(from); + } + + if (includeTo == false) { + to = nextDown(to); + } + + byte[] encodedFrom = BinaryRangeUtil.encodeDouble((Double) from); + byte[] encodedTo = BinaryRangeUtil.encodeDouble((Double) to); + return new BinaryDocValuesRangeQuery(field, queryType, BinaryDocValuesRangeQuery.LengthType.FIXED_8, + new BytesRef(encodedFrom), new BytesRef(encodedTo), from, to); } @Override @@ -730,8 +757,8 @@ public BytesRef encodeRanges(Set ranges) throws IOException { } @Override - BytesRef[] encodeRange(Object from, Object to) { - return LONG.encodeRange(from, to); + public Query dvRangeQuery(String field, QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) { + return LONG.dvRangeQuery(field, queryType, from, to, includeFrom, includeTo); } @Override @@ -778,10 +805,19 @@ public BytesRef encodeRanges(Set ranges) throws IOException { } @Override - BytesRef[] encodeRange(Object from, Object to) { - byte[] encodedFrom = BinaryRangeUtil.encode(((Number) from).longValue()); - byte[] encodedTo = BinaryRangeUtil.encode(((Number) to).longValue()); - return new BytesRef[]{new BytesRef(encodedFrom), new BytesRef(encodedTo)}; + public Query dvRangeQuery(String field, QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) { + if (includeFrom == false) { + from = nextUp(from); + } + + if (includeTo == false) { + to = nextDown(to); + } + + byte[] encodedFrom = BinaryRangeUtil.encodeLong(((Number) from).longValue()); + byte[] encodedTo = BinaryRangeUtil.encodeLong(((Number) to).longValue()); + return new BinaryDocValuesRangeQuery(field, queryType, BinaryDocValuesRangeQuery.LengthType.VARIABLE, + new BytesRef(encodedFrom), new BytesRef(encodedTo), from, to); } @Override @@ -897,19 +933,8 @@ public Query rangeQuery(String field, boolean hasDocValues, Object from, Object // rounded up via parseFrom and parseTo methods. public abstract BytesRef encodeRanges(Set ranges) throws IOException; - public Query dvRangeQuery(String field, QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) { - if (includeFrom == false) { - from = nextUp(from); - } - - if (includeTo == false) { - to = nextDown(to); - } - BytesRef[] range = encodeRange(from, to); - return new BinaryDocValuesRangeQuery(field, queryType, range[0], range[1], from, to); - } - - abstract BytesRef[] encodeRange(Object from, Object to); + public abstract Query dvRangeQuery(String field, QueryType queryType, Object from, Object to, + boolean includeFrom, boolean includeTo); public final String name; private final NumberType numberType; diff --git a/core/src/test/java/org/elasticsearch/index/mapper/BinaryRangeUtilTests.java b/core/src/test/java/org/elasticsearch/index/mapper/BinaryRangeUtilTests.java index 8a4e6945ffc36..20d4af1f0b600 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/BinaryRangeUtilTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/BinaryRangeUtilTests.java @@ -24,11 +24,11 @@ public class BinaryRangeUtilTests extends ESTestCase { public void testBasics() { - BytesRef encoded1 = new BytesRef(BinaryRangeUtil.encode(Long.MIN_VALUE)); - BytesRef encoded2 = new BytesRef(BinaryRangeUtil.encode(-1L)); - BytesRef encoded3 = new BytesRef(BinaryRangeUtil.encode(0L)); - BytesRef encoded4 = new BytesRef(BinaryRangeUtil.encode(1L)); - BytesRef encoded5 = new BytesRef(BinaryRangeUtil.encode(Long.MAX_VALUE)); + BytesRef encoded1 = new BytesRef(BinaryRangeUtil.encodeLong(Long.MIN_VALUE)); + BytesRef encoded2 = new BytesRef(BinaryRangeUtil.encodeLong(-1L)); + BytesRef encoded3 = new BytesRef(BinaryRangeUtil.encodeLong(0L)); + BytesRef encoded4 = new BytesRef(BinaryRangeUtil.encodeLong(1L)); + BytesRef encoded5 = new BytesRef(BinaryRangeUtil.encodeLong(Long.MAX_VALUE)); assertTrue(encoded1.compareTo(encoded2) < 0); assertTrue(encoded2.compareTo(encoded1) > 0); @@ -39,11 +39,12 @@ public void testBasics() { assertTrue(encoded4.compareTo(encoded5) < 0); assertTrue(encoded5.compareTo(encoded4) > 0); - encoded1 = new BytesRef(BinaryRangeUtil.encode(Double.NEGATIVE_INFINITY)); - encoded2 = new BytesRef(BinaryRangeUtil.encode(-1D)); - encoded3 = new BytesRef(BinaryRangeUtil.encode(0D)); - encoded4 = new BytesRef(BinaryRangeUtil.encode(1D)); - encoded5 = new BytesRef(BinaryRangeUtil.encode(Double.POSITIVE_INFINITY)); + encoded1 = new BytesRef(BinaryRangeUtil.encodeDouble(Double.NEGATIVE_INFINITY)); + encoded2 = new BytesRef(BinaryRangeUtil.encodeDouble(-1D)); + encoded3 = new BytesRef(BinaryRangeUtil.encodeDouble(-0D)); + encoded4 = new BytesRef(BinaryRangeUtil.encodeDouble(0D)); + encoded5 = new BytesRef(BinaryRangeUtil.encodeDouble(1D)); + BytesRef encoded6 = new BytesRef(BinaryRangeUtil.encodeDouble(Double.POSITIVE_INFINITY)); assertTrue(encoded1.compareTo(encoded2) < 0); assertTrue(encoded2.compareTo(encoded1) > 0); @@ -53,15 +54,35 @@ public void testBasics() { assertTrue(encoded4.compareTo(encoded3) > 0); assertTrue(encoded4.compareTo(encoded5) < 0); assertTrue(encoded5.compareTo(encoded4) > 0); + assertTrue(encoded5.compareTo(encoded6) < 0); + assertTrue(encoded6.compareTo(encoded5) > 0); + + encoded1 = new BytesRef(BinaryRangeUtil.encodeFloat(Float.NEGATIVE_INFINITY)); + encoded2 = new BytesRef(BinaryRangeUtil.encodeFloat(-1F)); + encoded3 = new BytesRef(BinaryRangeUtil.encodeFloat(-0F)); + encoded4 = new BytesRef(BinaryRangeUtil.encodeFloat(0F)); + encoded5 = new BytesRef(BinaryRangeUtil.encodeFloat(1F)); + encoded6 = new BytesRef(BinaryRangeUtil.encodeFloat(Float.POSITIVE_INFINITY)); + + assertTrue(encoded1.compareTo(encoded2) < 0); + assertTrue(encoded2.compareTo(encoded1) > 0); + assertTrue(encoded2.compareTo(encoded3) < 0); + assertTrue(encoded3.compareTo(encoded2) > 0); + assertTrue(encoded3.compareTo(encoded4) < 0); + assertTrue(encoded4.compareTo(encoded3) > 0); + assertTrue(encoded4.compareTo(encoded5) < 0); + assertTrue(encoded5.compareTo(encoded4) > 0); + assertTrue(encoded5.compareTo(encoded6) < 0); + assertTrue(encoded6.compareTo(encoded5) > 0); } public void testEncode_long() { int iters = randomIntBetween(32, 1024); for (int i = 0; i < iters; i++) { long number1 = randomLong(); - BytesRef encodedNumber1 = new BytesRef(BinaryRangeUtil.encode(number1)); - long number2 = randomLong(); - BytesRef encodedNumber2 = new BytesRef(BinaryRangeUtil.encode(number2)); + BytesRef encodedNumber1 = new BytesRef(BinaryRangeUtil.encodeLong(number1)); + long number2 = randomBoolean() ? number1 + 1 : randomLong(); + BytesRef encodedNumber2 = new BytesRef(BinaryRangeUtil.encodeLong(number2)); int cmp = normalize(Long.compare(number1, number2)); assertEquals(cmp, normalize(encodedNumber1.compareTo(encodedNumber2))); @@ -70,14 +91,48 @@ public void testEncode_long() { } } + public void testVariableLengthEncoding() { + for (int i = -8; i <= 7; ++i) { + assertEquals(1, BinaryRangeUtil.encodeLong(i).length); + } + for (int i = -2048; i <= 2047; ++i) { + if (i < -8 ||i > 7) { + assertEquals(2, BinaryRangeUtil.encodeLong(i).length); + } + } + assertEquals(3, BinaryRangeUtil.encodeLong(-2049).length); + assertEquals(3, BinaryRangeUtil.encodeLong(2048).length); + assertEquals(9, BinaryRangeUtil.encodeLong(Long.MIN_VALUE).length); + assertEquals(9, BinaryRangeUtil.encodeLong(Long.MAX_VALUE).length); + } + public void testEncode_double() { int iters = randomIntBetween(32, 1024); for (int i = 0; i < iters; i++) { double number1 = randomDouble(); - BytesRef encodedNumber1 = new BytesRef(BinaryRangeUtil.encode(number1)); - double number2 = randomDouble(); - BytesRef encodedNumber2 = new BytesRef(BinaryRangeUtil.encode(number2)); + BytesRef encodedNumber1 = new BytesRef(BinaryRangeUtil.encodeDouble(number1)); + double number2 = randomBoolean() ? Math.nextUp(number1) : randomDouble(); + BytesRef encodedNumber2 = new BytesRef(BinaryRangeUtil.encodeDouble(number2)); + + assertEquals(8, encodedNumber1.length); + assertEquals(8, encodedNumber2.length); + int cmp = normalize(Double.compare(number1, number2)); + assertEquals(cmp, normalize(encodedNumber1.compareTo(encodedNumber2))); + cmp = normalize(Double.compare(number2, number1)); + assertEquals(cmp, normalize(encodedNumber2.compareTo(encodedNumber1))); + } + } + + public void testEncode_Float() { + int iters = randomIntBetween(32, 1024); + for (int i = 0; i < iters; i++) { + float number1 = randomFloat(); + BytesRef encodedNumber1 = new BytesRef(BinaryRangeUtil.encodeFloat(number1)); + float number2 = randomBoolean() ? Math.nextUp(number1) : randomFloat(); + BytesRef encodedNumber2 = new BytesRef(BinaryRangeUtil.encodeFloat(number2)); + assertEquals(4, encodedNumber1.length); + assertEquals(4, encodedNumber2.length); int cmp = normalize(Double.compare(number1, number2)); assertEquals(cmp, normalize(encodedNumber1.compareTo(encodedNumber2))); cmp = normalize(Double.compare(number2, number1)); From c709b8d6ac7ef50a42c3f69f07132d2b0515f385 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Wed, 13 Sep 2017 16:09:00 +0200 Subject: [PATCH 20/67] Fix incomplete sentences in parent-join docs (#26623) * Fix incomplete sentences in parent-join docs Closes #26590 --- docs/reference/mapping/types/parent-join.asciidoc | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/docs/reference/mapping/types/parent-join.asciidoc b/docs/reference/mapping/types/parent-join.asciidoc index 048e866d03b48..ad33205650d5b 100644 --- a/docs/reference/mapping/types/parent-join.asciidoc +++ b/docs/reference/mapping/types/parent-join.asciidoc @@ -32,7 +32,7 @@ PUT my_index To index a document with a join, the name of the relation and the optional parent of the document must be provided in the `source`. -For instance the following creates two parent documents in the `question` context: +For instance the following example creates two `parent` documents in the `question` context: [source,js] -------------------------------------------------- @@ -85,8 +85,7 @@ must be added in the `_source`. WARNING: It is required to index the lineage of a parent in the same shard so you must always route child documents using their greater parent id. -For instance the following index two children documents pointing to the same parent `1` -with a `routing` value equals to the `id` of the parent: +For instance the following example shows how to index two `child` documents: [source,js] -------------------------------------------------- @@ -111,7 +110,7 @@ PUT my_index/doc/4?routing=1&refresh // CONSOLE // TEST[continued] -<1> This child document must be on the same shard than its parent +<1> The routing value is mandatory because parent and child documents must be indexed on the same shard <2> `answer` is the name of the join for this document <3> The parent id of this child document From 64770b3fbdd607298f98314e8529840eb309444b Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 13 Sep 2017 17:00:52 +0200 Subject: [PATCH 21/67] Remove MapperService#dynamic. (#26603) We ignore it as of 6.0 and forbid it as of 7.0. --- .../index/mapper/MapperService.java | 33 +++++-------------- .../action/support/AutoCreateIndexTests.java | 10 ------ .../mapper/DynamicMappingVersionTests.java | 15 ++------- 3 files changed, 10 insertions(+), 48 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index c9851ed7a1d05..9d46afac3cb23 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -50,7 +50,6 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.InvalidTypeNameException; -import org.elasticsearch.indices.TypeMissingException; import org.elasticsearch.indices.mapper.MapperRegistry; import java.io.Closeable; @@ -98,8 +97,10 @@ public enum MergeReason { public static final Setting INDEX_MAPPING_DEPTH_LIMIT_SETTING = Setting.longSetting("index.mapping.depth.limit", 20L, 1, Property.Dynamic, Property.IndexScope); public static final boolean INDEX_MAPPER_DYNAMIC_DEFAULT = true; + @Deprecated public static final Setting INDEX_MAPPER_DYNAMIC_SETTING = - Setting.boolSetting("index.mapper.dynamic", INDEX_MAPPER_DYNAMIC_DEFAULT, Property.Dynamic, Property.IndexScope); + Setting.boolSetting("index.mapper.dynamic", INDEX_MAPPER_DYNAMIC_DEFAULT, + Property.Dynamic, Property.IndexScope, Property.Deprecated); private static ObjectHashSet META_FIELDS = ObjectHashSet.from( "_uid", "_id", "_type", "_parent", "_routing", "_index", @@ -110,11 +111,6 @@ public enum MergeReason { private final IndexAnalyzers indexAnalyzers; - /** - * Will create types automatically if they do not exists in the mapping definition yet - */ - private final boolean dynamic; - private volatile String defaultMappingSource; private volatile Map mappers = emptyMap(); @@ -148,24 +144,15 @@ public MapperService(IndexSettings indexSettings, IndexAnalyzers indexAnalyzers, this.searchQuoteAnalyzer = new MapperAnalyzerWrapper(indexAnalyzers.getDefaultSearchQuoteAnalyzer(), p -> p.searchQuoteAnalyzer()); this.mapperRegistry = mapperRegistry; - if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_6_0_0_alpha1)) { - if (INDEX_MAPPER_DYNAMIC_SETTING.exists(indexSettings.getSettings())) { - if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0_alpha1)) { - throw new IllegalArgumentException("Setting " + INDEX_MAPPER_DYNAMIC_SETTING.getKey() + " was removed after version 6.0.0"); - } else { - DEPRECATION_LOGGER.deprecated("Setting " + INDEX_MAPPER_DYNAMIC_SETTING.getKey() + " is deprecated since indices may not have more than one type anymore."); - } - } - this.dynamic = INDEX_MAPPER_DYNAMIC_DEFAULT; - } else { - this.dynamic = this.indexSettings.getValue(INDEX_MAPPER_DYNAMIC_SETTING); + if (INDEX_MAPPER_DYNAMIC_SETTING.exists(indexSettings.getSettings()) && + indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0_alpha1)) { + throw new IllegalArgumentException("Setting " + INDEX_MAPPER_DYNAMIC_SETTING.getKey() + " was removed after version 6.0.0"); } + defaultMappingSource = "{\"_default_\":{}}"; if (logger.isTraceEnabled()) { - logger.trace("using dynamic[{}], default mapping source[{}]", dynamic, defaultMappingSource); - } else if (logger.isDebugEnabled()) { - logger.debug("using dynamic[{}]", dynamic); + logger.trace("default mapping source[{}]", defaultMappingSource); } } @@ -739,10 +726,6 @@ public DocumentMapperForType documentMapperWithAutoCreate(String type) { if (mapper != null) { return new DocumentMapperForType(mapper, null); } - if (!dynamic) { - throw new TypeMissingException(index(), - new IllegalStateException("trying to auto create mapping, but dynamic mapping is disabled"), type); - } mapper = parse(type, null, true); return new DocumentMapperForType(mapper, mapper.mapping()); } diff --git a/core/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java b/core/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java index 159be84de0791..a0c3a8d1f4e60 100644 --- a/core/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java @@ -108,16 +108,6 @@ public void testExistingIndex() { buildClusterState("index1", "index2", "index3")), equalTo(false)); } - public void testDynamicMappingDisabled() { - Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), randomFrom(true, - randomAlphaOfLengthBetween(1, 10))) - .put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), false).build(); - AutoCreateIndex autoCreateIndex = newAutoCreateIndex(settings); - IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> - autoCreateIndex.shouldAutoCreate(randomAlphaOfLengthBetween(1, 10), buildClusterState())); - assertEquals("no such index and [index.mapper.dynamic] is [false]", e.getMessage()); - } - public void testAutoCreationPatternEnabled() { Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), randomFrom("+index*", "index*")) .build(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingVersionTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingVersionTests.java index 94af6c5454493..37c887401f24a 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingVersionTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingVersionTests.java @@ -19,12 +19,10 @@ package org.elasticsearch.index.mapper; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.indices.TypeMissingException; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; @@ -65,16 +63,7 @@ public void testDynamicMappingSettingRemoval() { .build(); Exception e = expectThrows(IllegalArgumentException.class, () -> createIndex("test-index", settings)); assertEquals(e.getMessage(), "Setting index.mapper.dynamic was removed after version 6.0.0"); + assertSettingDeprecationsAndWarnings(new Setting[] { MapperService.INDEX_MAPPER_DYNAMIC_SETTING }); } - public void testDynamicMappingDisablePreEs6() { - Settings settingsPreEs6 = Settings.builder() - .put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), false) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) - .build(); - MapperService preEs6MapperService = createIndex("pre-es6-index", settingsPreEs6).mapperService(); - Exception e = expectThrows(TypeMissingException.class, - () -> preEs6MapperService.documentMapperWithAutoCreate("pre-es6-type")); - assertEquals(e.getMessage(), "type[pre-es6-type] missing"); - } } From 027c555c9bddebcc866407a4c2235045dd8d6e42 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 13 Sep 2017 17:22:16 +0200 Subject: [PATCH 22/67] Add soft limit on allowed number of script fields in request (#26598) Requesting to many script_fields in a search request can be costly because of script execution. This change introduces a soft limit on the number of script fields that are allowed per request. The setting can be changed per index using the index.max_script_fields setting. Relates to #26390 --- .../common/settings/IndexScopedSettings.java | 1 + .../elasticsearch/index/IndexSettings.java | 23 ++++++++ .../elasticsearch/search/SearchService.java | 7 +++ .../index/IndexSettingsTests.java | 16 ++++++ .../search/SearchServiceTests.java | 54 ++++++++++++++++++- docs/reference/index-modules.asciidoc | 5 ++ .../rest-api-spec/test/search/30_limits.yml | 26 +++++++++ 7 files changed, 131 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 92d9cd96a71c7..9d4d30b066f1f 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -112,6 +112,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.MAX_RESULT_WINDOW_SETTING, IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING, IndexSettings.MAX_DOCVALUE_FIELDS_SEARCH_SETTING, + IndexSettings.MAX_SCRIPT_FIELDS_SETTING, IndexSettings.MAX_RESCORE_WINDOW_SETTING, IndexSettings.MAX_ADJACENCY_MATRIX_FILTERS_SETTING, IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING, diff --git a/core/src/main/java/org/elasticsearch/index/IndexSettings.java b/core/src/main/java/org/elasticsearch/index/IndexSettings.java index 7899136e68dae..20c23e3becddc 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/core/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -98,6 +98,15 @@ public final class IndexSettings { */ public static final Setting MAX_INNER_RESULT_WINDOW_SETTING = Setting.intSetting("index.max_inner_result_window", 100, 1, Property.Dynamic, Property.IndexScope); + + /** + * Index setting describing the maximum value of allowed `script_fields`that can be retrieved + * per search request. The default maximum of 50 is defensive for the reason that retrieving + * script fields is a costly operation. + */ + public static final Setting MAX_SCRIPT_FIELDS_SETTING = + Setting.intSetting("index.max_script_fields", 32, 0, Property.Dynamic, Property.IndexScope); + /** * Index setting describing the maximum value of allowed `docvalue_fields`that can be retrieved * per search request. The default maximum of 100 is defensive for the reason that retrieving @@ -229,6 +238,7 @@ public final class IndexSettings { private volatile int maxAdjacencyMatrixFilters; private volatile int maxRescoreWindow; private volatile int maxDocvalueFields; + private volatile int maxScriptFields; private volatile boolean TTLPurgeDisabled; /** * The maximum number of refresh listeners allows on this shard. @@ -331,6 +341,7 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti maxAdjacencyMatrixFilters = scopedSettings.get(MAX_ADJACENCY_MATRIX_FILTERS_SETTING); maxRescoreWindow = scopedSettings.get(MAX_RESCORE_WINDOW_SETTING); maxDocvalueFields = scopedSettings.get(MAX_DOCVALUE_FIELDS_SEARCH_SETTING); + maxScriptFields = scopedSettings.get(MAX_SCRIPT_FIELDS_SETTING); TTLPurgeDisabled = scopedSettings.get(INDEX_TTL_DISABLE_PURGE_SETTING); maxRefreshListeners = scopedSettings.get(MAX_REFRESH_LISTENERS_PER_SHARD); maxSlicesPerScroll = scopedSettings.get(MAX_SLICES_PER_SCROLL); @@ -361,6 +372,7 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti scopedSettings.addSettingsUpdateConsumer(MAX_ADJACENCY_MATRIX_FILTERS_SETTING, this::setMaxAdjacencyMatrixFilters); scopedSettings.addSettingsUpdateConsumer(MAX_RESCORE_WINDOW_SETTING, this::setMaxRescoreWindow); scopedSettings.addSettingsUpdateConsumer(MAX_DOCVALUE_FIELDS_SEARCH_SETTING, this::setMaxDocvalueFields); + scopedSettings.addSettingsUpdateConsumer(MAX_SCRIPT_FIELDS_SETTING, this::setMaxScriptFields); scopedSettings.addSettingsUpdateConsumer(INDEX_WARMER_ENABLED_SETTING, this::setEnableWarmer); scopedSettings.addSettingsUpdateConsumer(INDEX_GC_DELETES_SETTING, this::setGCDeletes); scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING, this::setTranslogFlushThresholdSize); @@ -628,6 +640,17 @@ private void setMaxDocvalueFields(int maxDocvalueFields) { this.maxDocvalueFields = maxDocvalueFields; } + /** + * Returns the maximum number of allowed script_fields to retrieve in a search request + */ + public int getMaxScriptFields() { + return this.maxScriptFields; + } + + private void setMaxScriptFields(int maxScriptFields) { + this.maxScriptFields = maxScriptFields; + } + /** * Returns the GC deletes cycle in milliseconds. */ diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 0b112a59c628c..f6dc8a0a39ba1 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -788,6 +788,13 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc } } if (source.scriptFields() != null) { + int maxAllowedScriptFields = context.mapperService().getIndexSettings().getMaxScriptFields(); + if (source.scriptFields().size() > maxAllowedScriptFields) { + throw new IllegalArgumentException( + "Trying to retrieve too many script_fields. Must be less than or equal to: [" + maxAllowedScriptFields + + "] but was [" + source.scriptFields().size() + "]. This limit can be set by changing the [" + + IndexSettings.MAX_SCRIPT_FIELDS_SETTING.getKey() + "] index level setting."); + } for (org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField field : source.scriptFields()) { SearchScript.Factory factory = scriptService.compile(field.script(), SearchScript.CONTEXT); SearchScript.LeafFactory searchScript = factory.newFactory(field.script().getParams(), context.lookup()); diff --git a/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java b/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java index bb147b8752efe..539c8d6939372 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java @@ -326,6 +326,22 @@ public void testMaxDocvalueFields() { assertEquals(IndexSettings.MAX_DOCVALUE_FIELDS_SEARCH_SETTING.get(Settings.EMPTY).intValue(), settings.getMaxDocvalueFields()); } + public void testMaxScriptFields() { + IndexMetaData metaData = newIndexMeta("index", Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexSettings.MAX_SCRIPT_FIELDS_SETTING.getKey(), 100).build()); + IndexSettings settings = new IndexSettings(metaData, Settings.EMPTY); + assertEquals(100, settings.getMaxScriptFields()); + settings.updateIndexMetaData( + newIndexMeta("index", Settings.builder().put(IndexSettings.MAX_SCRIPT_FIELDS_SETTING.getKey(), 20).build())); + assertEquals(20, settings.getMaxScriptFields()); + settings.updateIndexMetaData(newIndexMeta("index", Settings.EMPTY)); + assertEquals(IndexSettings.MAX_SCRIPT_FIELDS_SETTING.get(Settings.EMPTY).intValue(), settings.getMaxScriptFields()); + + metaData = newIndexMeta("index", Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build()); + settings = new IndexSettings(metaData, Settings.EMPTY); + assertEquals(IndexSettings.MAX_SCRIPT_FIELDS_SETTING.get(Settings.EMPTY).intValue(), settings.getMaxScriptFields()); + } + public void testMaxAdjacencyMatrixFiltersSetting() { IndexMetaData metaData = newIndexMeta("index", Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) diff --git a/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 10af4b333ea8b..5d166aaa628ba 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -47,6 +47,10 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.script.MockScriptEngine; +import org.elasticsearch.script.MockScriptPlugin; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; @@ -60,11 +64,14 @@ import java.io.IOException; import java.util.Collection; +import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; import static java.util.Collections.singletonList; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; @@ -83,7 +90,19 @@ protected boolean resetNodeAfterTest() { @Override protected Collection> getPlugins() { - return pluginList(FailOnRewriteQueryPlugin.class); + return pluginList(FailOnRewriteQueryPlugin.class, CustomScriptPlugin.class); + } + + public static class CustomScriptPlugin extends MockScriptPlugin { + + static final String DUMMY_SCRIPT = "dummyScript"; + + @Override + protected Map, Object>> pluginScripts() { + return Collections.singletonMap(DUMMY_SCRIPT, vars -> { + return "dummy"; + }); + } } @Override @@ -290,6 +309,39 @@ searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_A } } + /** + * test that getting more than the allowed number of script_fields throws an exception + */ + public void testMaxScriptFieldsSearch() throws IOException { + createIndex("index"); + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + // adding the maximum allowed number of script_fields to retrieve + int maxScriptFields = indexService.getIndexSettings().getMaxScriptFields(); + for (int i = 0; i < maxScriptFields; i++) { + searchSourceBuilder.scriptField("field" + i, + new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, Collections.emptyMap())); + } + try (SearchContext context = service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, + searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f), null)) { + assertNotNull(context); + searchSourceBuilder.scriptField("anotherScriptField", + new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, Collections.emptyMap())); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, + () -> service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, + searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f), null)); + assertEquals( + "Trying to retrieve too many script_fields. Must be less than or equal to: [" + maxScriptFields + "] but was [" + + (maxScriptFields + 1) + + "]. This limit can be set by changing the [index.max_script_fields] index level setting.", + ex.getMessage()); + } + } + public static class FailOnRewriteQueryPlugin extends Plugin implements SearchPlugin { @Override public List> getQueries() { diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 889f5a6b02e12..d42587eafafb3 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -139,6 +139,11 @@ specific index module: Defaults to `100`. Doc-value fields are costly since they might incur a per-field per-document seek. +`index.max_script_fields`:: + + The maximum number of `script_fields` that are allowed in a query. + Defaults to `32`. + `index.blocks.read_only`:: Set to `true` to make the index and index metadata read only, `false` to diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml index c8f0009658d22..b0b5e5ffede16 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml @@ -69,3 +69,29 @@ setup: query: match_all: {} docvalue_fields: ["one", "two", "three"] + +--- +"Script_fields size limit": + - skip: + version: " - 6.99.99" + reason: soft limit for script_fields only available as of 7.0.0 + + - do: + indices.create: + index: test_2 + body: + settings: + index.max_script_fields: 2 + + - do: + catch: /Trying to retrieve too many script_fields\. Must be less than or equal to[:] \[2\] but was \[3\]\. This limit can be set by changing the \[index.max_script_fields\] index level setting\./ + search: + index: test_2 + body: + query: + match_all: {} + script_fields: { + "test1" : { "script" : { "lang": "painless", "source": "1" }}, + "test2" : { "script" : { "lang": "painless", "source": "1" }}, + "test3" : { "script" : { "lang": "painless", "source": "1" }} + } From 7404221b55363197b4613b4ffb2d805136632272 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Wed, 13 Sep 2017 17:28:31 +0200 Subject: [PATCH 23/67] [Docs] Clarify size parameter in Completion Suggester doc (#26617) --- .../suggesters/completion-suggest.asciidoc | 21 +++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index 566a659279f60..5d8f5fa1cc5c2 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -156,9 +156,9 @@ POST music/_search?pretty { "suggest": { "song-suggest" : { - "prefix" : "nir", - "completion" : { - "field" : "suggest" + "prefix" : "nir", <1> + "completion" : { <2> + "field" : "suggest" <3> } } } @@ -167,6 +167,10 @@ POST music/_search?pretty // CONSOLE // TEST[continued] +<1> Prefix used to search for suggestions +<2> Type of suggestions +<3> Name of the field to search for suggestions in + returns this response: [source,js] @@ -218,14 +222,15 @@ filtering but using suggest on the `_search` endpoint does: [source,js] -------------------------------------------------- -POST music/_search?size=0 +POST music/_search { - "_source": "suggest", + "_source": "suggest", <1> "suggest": { "song-suggest" : { "prefix" : "nir", "completion" : { - "field" : "suggest" + "field" : "suggest", <2> + "size" : 5 <3> } } } @@ -234,6 +239,10 @@ POST music/_search?size=0 // CONSOLE // TEST[continued] +<1> Filter the source to return only the `suggest` field +<2> Name of the field to search for suggestions in +<3> Number of suggestions to return + Which should look like: [source,js] From 93da7720ff14ebb3a0edafa2fb920413e7c3c43e Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 13 Sep 2017 17:58:53 +0200 Subject: [PATCH 24/67] Move non-core mappers to a module. (#26549) Today we have all non-plugin mappers in core. I'd like to start moving those that neither map to json datatypes nor are very frequently used like `date` or `ip` to a module. This commit creates a new module called `mappers-extra` and moves the `scaled_float` and `token_count` mappers to it. I'd like to eventually move `range` fields there but it's more complicated due to their intimate relationship with range queries. Relates #10368 --- .../resources/checkstyle_suppressions.xml | 1 - .../index/mapper/CustomDocValuesField.java | 6 +- .../index/mapper/NumberFieldMapper.java | 83 +++-- .../index/query/RangeQueryBuilder.java | 1 - .../index/search/QueryParserHelper.java | 4 +- .../elasticsearch/indices/IndicesModule.java | 8 - .../org/elasticsearch/get/GetActionIT.java | 75 ---- .../index/mapper/MultiFieldTests.java | 8 - .../mapper/MultiFieldsIntegrationIT.java | 36 -- .../index/mapper/NumberFieldTypeTests.java | 23 +- .../index/query/RangeQueryBuilderTests.java | 17 +- .../index/query/TermsQueryBuilderTests.java | 3 +- .../search/query/QueryStringIT.java | 6 - .../search/query/SearchQueryIT.java | 15 - .../search/query/SimpleQueryStringIT.java | 2 - .../mapper/multifield/test-multi-fields.json | 6 - .../search/query/all-example-document.json | 1 - .../search/query/all-query-index.json | 4 +- modules/lang-painless/build.gradle | 4 + modules/mapper-extras/build.gradle | 23 ++ .../queries/BinaryDocValuesRangeQuery.java | 0 .../index/mapper/BinaryRangeUtil.java | 0 .../index/mapper/MapperExtrasPlugin.java | 42 +++ .../index/mapper/RangeFieldMapper.java | 13 +- .../index/mapper/ScaledFloatFieldMapper.java | 0 .../index/mapper/TokenCountFieldMapper.java | 0 ...ndomBinaryDocValuesRangeQueryTestCase.java | 12 +- .../BinaryDocValuesRangeQueryTests.java | 0 ...eRandomBinaryDocValuesRangeQueryTests.java | 6 +- ...tRandomBinaryDocValuesRangeQueryTests.java | 6 +- ...sRandomBinaryDocValuesRangeQueryTests.java | 6 +- ...rRandomBinaryDocValuesRangeQueryTests.java | 6 +- ...gRandomBinaryDocValuesRangeQueryTests.java | 6 +- .../index/mapper}/BWCTemplateTests.java | 19 +- .../index/mapper/BinaryRangeUtilTests.java | 0 .../MapperExtrasClientYamlTestSuiteIT.java | 40 +++ .../index/mapper/RangeFieldMapperTests.java | 9 + .../index/mapper/RangeFieldTypeTests.java | 2 +- .../mapper/ScaledFloatFieldMapperTests.java | 2 +- .../mapper/ScaledFloatFieldTypeTests.java | 0 .../TokenCountFieldMapperIntegrationIT.java | 3 +- .../mapper/TokenCountFieldMapperTests.java | 11 +- .../index/mapper}/filebeat-5.0.template.json | 0 .../mapper}/metricbeat-5.0.template.json | 0 .../mapper}/packetbeat-5.0.template.json | 0 .../mapper}/winlogbeat-5.0.template.json | 0 .../rest-api-spec/test/range/10_basic.yml | 334 ++++++++++++++++++ .../test/scaled_float/10_basic.yml | 105 ++++++ modules/percolator/build.gradle | 9 + .../test/search.aggregation/20_terms.yml | 53 --- settings.gradle | 2 + .../AbstractNumericFieldMapperTestCase.java | 0 .../test/AbstractQueryTestCase.java | 12 +- 53 files changed, 692 insertions(+), 332 deletions(-) create mode 100644 modules/mapper-extras/build.gradle rename {core => modules/mapper-extras}/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java (100%) rename {core => modules/mapper-extras}/src/main/java/org/elasticsearch/index/mapper/BinaryRangeUtil.java (100%) create mode 100644 modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java rename {core => modules/mapper-extras}/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java (98%) rename {core => modules/mapper-extras}/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java (100%) rename {core => modules/mapper-extras}/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java (100%) rename {core => modules/mapper-extras}/src/test/java/org/apache/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java (92%) rename {core => modules/mapper-extras}/src/test/java/org/apache/lucene/queries/BinaryDocValuesRangeQueryTests.java (100%) rename {core => modules/mapper-extras}/src/test/java/org/apache/lucene/queries/DoubleRandomBinaryDocValuesRangeQueryTests.java (96%) rename {core => modules/mapper-extras}/src/test/java/org/apache/lucene/queries/FloatRandomBinaryDocValuesRangeQueryTests.java (96%) rename {core => modules/mapper-extras}/src/test/java/org/apache/lucene/queries/InetAddressRandomBinaryDocValuesRangeQueryTests.java (96%) rename {core => modules/mapper-extras}/src/test/java/org/apache/lucene/queries/IntegerRandomBinaryDocValuesRangeQueryTests.java (96%) rename {core => modules/mapper-extras}/src/test/java/org/apache/lucene/queries/LongRandomBinaryDocValuesRangeQueryTests.java (96%) rename {core/src/test/java/org/elasticsearch/action/admin/indices/template => modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper}/BWCTemplateTests.java (82%) rename {core => modules/mapper-extras}/src/test/java/org/elasticsearch/index/mapper/BinaryRangeUtilTests.java (100%) create mode 100644 modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/MapperExtrasClientYamlTestSuiteIT.java rename {core => modules/mapper-extras}/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java (98%) rename {core => modules/mapper-extras}/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java (99%) rename {core => modules/mapper-extras}/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java (99%) rename {core => modules/mapper-extras}/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java (100%) rename {core => modules/mapper-extras}/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperIntegrationIT.java (99%) rename {core => modules/mapper-extras}/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java (99%) rename {core/src/test/resources/org/elasticsearch/action/admin/indices/template => modules/mapper-extras/src/test/resources/org/elasticsearch/index/mapper}/filebeat-5.0.template.json (100%) rename {core/src/test/resources/org/elasticsearch/action/admin/indices/template => modules/mapper-extras/src/test/resources/org/elasticsearch/index/mapper}/metricbeat-5.0.template.json (100%) rename {core/src/test/resources/org/elasticsearch/action/admin/indices/template => modules/mapper-extras/src/test/resources/org/elasticsearch/index/mapper}/packetbeat-5.0.template.json (100%) rename {core/src/test/resources/org/elasticsearch/action/admin/indices/template => modules/mapper-extras/src/test/resources/org/elasticsearch/index/mapper}/winlogbeat-5.0.template.json (100%) create mode 100644 modules/mapper-extras/src/test/resources/rest-api-spec/test/range/10_basic.yml create mode 100644 modules/mapper-extras/src/test/resources/rest-api-spec/test/scaled_float/10_basic.yml rename {core/src/test => test/framework/src/main}/java/org/elasticsearch/index/mapper/AbstractNumericFieldMapperTestCase.java (100%) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index bd6d321bb1710..f3ac73690a95b 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -596,7 +596,6 @@ - diff --git a/core/src/main/java/org/elasticsearch/index/mapper/CustomDocValuesField.java b/core/src/main/java/org/elasticsearch/index/mapper/CustomDocValuesField.java index 60fbfc0698cda..f77d480e72298 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/CustomDocValuesField.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/CustomDocValuesField.java @@ -28,8 +28,8 @@ import java.io.Reader; -// used for binary and geo fields -abstract class CustomDocValuesField implements IndexableField { +// used for binary, geo and range fields +public abstract class CustomDocValuesField implements IndexableField { public static final FieldType TYPE = new FieldType(); static { @@ -39,7 +39,7 @@ abstract class CustomDocValuesField implements IndexableField { private final String name; - CustomDocValuesField(String name) { + protected CustomDocValuesField(String name) { this.name = name; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 6f5190b637640..83ebd6d9fb56c 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -62,8 +62,7 @@ /** A {@link FieldMapper} for numeric types: byte, short, int, long, float and double. */ public class NumberFieldMapper extends FieldMapper { - // this is private since it has a different default - static final Setting COERCE_SETTING = + public static final Setting COERCE_SETTING = Setting.boolSetting("index.mapping.coerce", true, Property.IndexScope); public static class Defaults { @@ -162,7 +161,7 @@ public Mapper.Builder parse(String name, Map node, public enum NumberType { HALF_FLOAT("half_float", NumericType.HALF_FLOAT) { @Override - Float parse(Object value, boolean coerce) { + public Float parse(Object value, boolean coerce) { final float result; if (value instanceof Number) { @@ -178,20 +177,20 @@ Float parse(Object value, boolean coerce) { } @Override - Float parse(XContentParser parser, boolean coerce) throws IOException { + public Float parse(XContentParser parser, boolean coerce) throws IOException { float parsed = parser.floatValue(coerce); validateParsed(parsed); return parsed; } @Override - Query termQuery(String field, Object value) { + public Query termQuery(String field, Object value) { float v = parse(value, false); return HalfFloatPoint.newExactQuery(field, v); } @Override - Query termsQuery(String field, List values) { + public Query termsQuery(String field, List values) { float[] v = new float[values.size()]; for (int i = 0; i < values.size(); ++i) { v[i] = parse(values.get(i), false); @@ -200,7 +199,7 @@ Query termsQuery(String field, List values) { } @Override - Query rangeQuery(String field, Object lowerTerm, Object upperTerm, + public Query rangeQuery(String field, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, boolean hasDocValues) { float l = Float.NEGATIVE_INFINITY; @@ -254,7 +253,7 @@ private void validateParsed(float value) { }, FLOAT("float", NumericType.FLOAT) { @Override - Float parse(Object value, boolean coerce) { + public Float parse(Object value, boolean coerce) { final float result; if (value instanceof Number) { @@ -270,20 +269,20 @@ Float parse(Object value, boolean coerce) { } @Override - Float parse(XContentParser parser, boolean coerce) throws IOException { + public Float parse(XContentParser parser, boolean coerce) throws IOException { float parsed = parser.floatValue(coerce); validateParsed(parsed); return parsed; } @Override - Query termQuery(String field, Object value) { + public Query termQuery(String field, Object value) { float v = parse(value, false); return FloatPoint.newExactQuery(field, v); } @Override - Query termsQuery(String field, List values) { + public Query termsQuery(String field, List values) { float[] v = new float[values.size()]; for (int i = 0; i < values.size(); ++i) { v[i] = parse(values.get(i), false); @@ -292,7 +291,7 @@ Query termsQuery(String field, List values) { } @Override - Query rangeQuery(String field, Object lowerTerm, Object upperTerm, + public Query rangeQuery(String field, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, boolean hasDocValues) { float l = Float.NEGATIVE_INFINITY; @@ -344,27 +343,27 @@ private void validateParsed(float value) { }, DOUBLE("double", NumericType.DOUBLE) { @Override - Double parse(Object value, boolean coerce) { + public Double parse(Object value, boolean coerce) { double parsed = objectToDouble(value); validateParsed(parsed); return parsed; } @Override - Double parse(XContentParser parser, boolean coerce) throws IOException { + public Double parse(XContentParser parser, boolean coerce) throws IOException { double parsed = parser.doubleValue(coerce); validateParsed(parsed); return parsed; } @Override - Query termQuery(String field, Object value) { + public Query termQuery(String field, Object value) { double v = parse(value, false); return DoublePoint.newExactQuery(field, v); } @Override - Query termsQuery(String field, List values) { + public Query termsQuery(String field, List values) { double[] v = new double[values.size()]; for (int i = 0; i < values.size(); ++i) { v[i] = parse(values.get(i), false); @@ -373,7 +372,7 @@ Query termsQuery(String field, List values) { } @Override - Query rangeQuery(String field, Object lowerTerm, Object upperTerm, + public Query rangeQuery(String field, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, boolean hasDocValues) { double l = Double.NEGATIVE_INFINITY; @@ -425,7 +424,7 @@ private void validateParsed(double value) { }, BYTE("byte", NumericType.BYTE) { @Override - Byte parse(Object value, boolean coerce) { + public Byte parse(Object value, boolean coerce) { double doubleValue = objectToDouble(value); if (doubleValue < Byte.MIN_VALUE || doubleValue > Byte.MAX_VALUE) { @@ -443,7 +442,7 @@ Byte parse(Object value, boolean coerce) { } @Override - Short parse(XContentParser parser, boolean coerce) throws IOException { + public Short parse(XContentParser parser, boolean coerce) throws IOException { int value = parser.intValue(coerce); if (value < Byte.MIN_VALUE || value > Byte.MAX_VALUE) { throw new IllegalArgumentException("Value [" + value + "] is out of range for a byte"); @@ -452,17 +451,17 @@ Short parse(XContentParser parser, boolean coerce) throws IOException { } @Override - Query termQuery(String field, Object value) { + public Query termQuery(String field, Object value) { return INTEGER.termQuery(field, value); } @Override - Query termsQuery(String field, List values) { + public Query termsQuery(String field, List values) { return INTEGER.termsQuery(field, values); } @Override - Query rangeQuery(String field, Object lowerTerm, Object upperTerm, + public Query rangeQuery(String field, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, boolean hasDocValues) { return INTEGER.rangeQuery(field, lowerTerm, upperTerm, includeLower, includeUpper, hasDocValues); @@ -481,7 +480,7 @@ Number valueForSearch(Number value) { }, SHORT("short", NumericType.SHORT) { @Override - Short parse(Object value, boolean coerce) { + public Short parse(Object value, boolean coerce) { double doubleValue = objectToDouble(value); if (doubleValue < Short.MIN_VALUE || doubleValue > Short.MAX_VALUE) { @@ -499,22 +498,22 @@ Short parse(Object value, boolean coerce) { } @Override - Short parse(XContentParser parser, boolean coerce) throws IOException { + public Short parse(XContentParser parser, boolean coerce) throws IOException { return parser.shortValue(coerce); } @Override - Query termQuery(String field, Object value) { + public Query termQuery(String field, Object value) { return INTEGER.termQuery(field, value); } @Override - Query termsQuery(String field, List values) { + public Query termsQuery(String field, List values) { return INTEGER.termsQuery(field, values); } @Override - Query rangeQuery(String field, Object lowerTerm, Object upperTerm, + public Query rangeQuery(String field, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, boolean hasDocValues) { return INTEGER.rangeQuery(field, lowerTerm, upperTerm, includeLower, includeUpper, hasDocValues); @@ -533,7 +532,7 @@ Number valueForSearch(Number value) { }, INTEGER("integer", NumericType.INT) { @Override - Integer parse(Object value, boolean coerce) { + public Integer parse(Object value, boolean coerce) { double doubleValue = objectToDouble(value); if (doubleValue < Integer.MIN_VALUE || doubleValue > Integer.MAX_VALUE) { @@ -551,12 +550,12 @@ Integer parse(Object value, boolean coerce) { } @Override - Integer parse(XContentParser parser, boolean coerce) throws IOException { + public Integer parse(XContentParser parser, boolean coerce) throws IOException { return parser.intValue(coerce); } @Override - Query termQuery(String field, Object value) { + public Query termQuery(String field, Object value) { if (hasDecimalPart(value)) { return Queries.newMatchNoDocsQuery("Value [" + value + "] has a decimal part"); } @@ -565,7 +564,7 @@ Query termQuery(String field, Object value) { } @Override - Query termsQuery(String field, List values) { + public Query termsQuery(String field, List values) { int[] v = new int[values.size()]; int upTo = 0; @@ -586,7 +585,7 @@ Query termsQuery(String field, List values) { } @Override - Query rangeQuery(String field, Object lowerTerm, Object upperTerm, + public Query rangeQuery(String field, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, boolean hasDocValues) { int l = Integer.MIN_VALUE; @@ -644,7 +643,7 @@ public List createFields(String name, Number value, }, LONG("long", NumericType.LONG) { @Override - Long parse(Object value, boolean coerce) { + public Long parse(Object value, boolean coerce) { if (value instanceof Long) { return (Long)value; } @@ -665,12 +664,12 @@ Long parse(Object value, boolean coerce) { } @Override - Long parse(XContentParser parser, boolean coerce) throws IOException { + public Long parse(XContentParser parser, boolean coerce) throws IOException { return parser.longValue(coerce); } @Override - Query termQuery(String field, Object value) { + public Query termQuery(String field, Object value) { if (hasDecimalPart(value)) { return Queries.newMatchNoDocsQuery("Value [" + value + "] has a decimal part"); } @@ -679,7 +678,7 @@ Query termQuery(String field, Object value) { } @Override - Query termsQuery(String field, List values) { + public Query termsQuery(String field, List values) { long[] v = new long[values.size()]; int upTo = 0; @@ -700,7 +699,7 @@ Query termsQuery(String field, List values) { } @Override - Query rangeQuery(String field, Object lowerTerm, Object upperTerm, + public Query rangeQuery(String field, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, boolean hasDocValues) { long l = Long.MIN_VALUE; @@ -773,13 +772,13 @@ public final String typeName() { final NumericType numericType() { return numericType; } - abstract Query termQuery(String field, Object value); - abstract Query termsQuery(String field, List values); - abstract Query rangeQuery(String field, Object lowerTerm, Object upperTerm, + public abstract Query termQuery(String field, Object value); + public abstract Query termsQuery(String field, List values); + public abstract Query rangeQuery(String field, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, boolean hasDocValues); - abstract Number parse(XContentParser parser, boolean coerce) throws IOException; - abstract Number parse(Object value, boolean coerce); + public abstract Number parse(XContentParser parser, boolean coerce) throws IOException; + public abstract Number parse(Object value, boolean coerce); public abstract List createFields(String name, Number value, boolean indexed, boolean docValued, boolean stored); Number valueForSearch(Number value) { diff --git a/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java index bdf2b6cf530df..0d4ded10d2cbb 100644 --- a/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java @@ -39,7 +39,6 @@ import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.RangeFieldMapper; import org.joda.time.DateTimeZone; import java.io.IOException; diff --git a/core/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java b/core/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java index e6846543b8cac..18a124d86b35c 100644 --- a/core/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java +++ b/core/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java @@ -28,7 +28,6 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.NumberFieldMapper; -import org.elasticsearch.index.mapper.ScaledFloatFieldMapper; import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.index.query.QueryShardContext; @@ -44,6 +43,7 @@ */ public final class QueryParserHelper { // Mapping types the "all-ish" query can be executed against + // TODO: Fix the API so that we don't need a hardcoded list of types private static final Set ALLOWED_QUERY_MAPPER_TYPES; static { @@ -54,7 +54,7 @@ public final class QueryParserHelper { for (NumberFieldMapper.NumberType nt : NumberFieldMapper.NumberType.values()) { ALLOWED_QUERY_MAPPER_TYPES.add(nt.typeName()); } - ALLOWED_QUERY_MAPPER_TYPES.add(ScaledFloatFieldMapper.CONTENT_TYPE); + ALLOWED_QUERY_MAPPER_TYPES.add("scaled_float"); ALLOWED_QUERY_MAPPER_TYPES.add(TextFieldMapper.CONTENT_TYPE); } diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java index e1d2b169c7fc3..2751c34a7f314 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -43,13 +43,10 @@ import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.mapper.ParentFieldMapper; -import org.elasticsearch.index.mapper.RangeFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; -import org.elasticsearch.index.mapper.ScaledFloatFieldMapper; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TextFieldMapper; -import org.elasticsearch.index.mapper.TokenCountFieldMapper; import org.elasticsearch.index.mapper.TypeFieldMapper; import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; @@ -95,17 +92,12 @@ private Map getMappers(List mapperPlugi for (NumberFieldMapper.NumberType type : NumberFieldMapper.NumberType.values()) { mappers.put(type.typeName(), new NumberFieldMapper.TypeParser(type)); } - for (RangeFieldMapper.RangeType type : RangeFieldMapper.RangeType.values()) { - mappers.put(type.typeName(), new RangeFieldMapper.TypeParser(type)); - } mappers.put(BooleanFieldMapper.CONTENT_TYPE, new BooleanFieldMapper.TypeParser()); mappers.put(BinaryFieldMapper.CONTENT_TYPE, new BinaryFieldMapper.TypeParser()); mappers.put(DateFieldMapper.CONTENT_TYPE, new DateFieldMapper.TypeParser()); mappers.put(IpFieldMapper.CONTENT_TYPE, new IpFieldMapper.TypeParser()); - mappers.put(ScaledFloatFieldMapper.CONTENT_TYPE, new ScaledFloatFieldMapper.TypeParser()); mappers.put(TextFieldMapper.CONTENT_TYPE, new TextFieldMapper.TypeParser()); mappers.put(KeywordFieldMapper.CONTENT_TYPE, new KeywordFieldMapper.TypeParser()); - mappers.put(TokenCountFieldMapper.CONTENT_TYPE, new TokenCountFieldMapper.TypeParser()); mappers.put(ObjectMapper.CONTENT_TYPE, new ObjectMapper.TypeParser()); mappers.put(ObjectMapper.NESTED_CONTENT_TYPE, new ObjectMapper.TypeParser()); mappers.put(CompletionFieldMapper.CONTENT_TYPE, new CompletionFieldMapper.TypeParser()); diff --git a/core/src/test/java/org/elasticsearch/get/GetActionIT.java b/core/src/test/java/org/elasticsearch/get/GetActionIT.java index f9c4b0d960638..1fd6084a231dc 100644 --- a/core/src/test/java/org/elasticsearch/get/GetActionIT.java +++ b/core/src/test/java/org/elasticsearch/get/GetActionIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.get; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.ShardOperationFailedException; @@ -913,68 +912,6 @@ void indexSingleDocumentWithStringFieldsGeneratedFromText(boolean stored, boolea index("test", "doc", "1", doc); } - public void testGeneratedNumberFieldsUnstored() throws IOException { - indexSingleDocumentWithNumericFieldsGeneratedFromText(false, randomBoolean()); - String[] fieldsList = {"token_count", "text.token_count"}; - // before refresh - document is only in translog - assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList); - refresh(); - //after refresh - document is in translog and also indexed - assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList); - flush(); - //after flush - document is in not anymore translog - only indexed - assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList); - } - - public void testGeneratedNumberFieldsStored() throws IOException { - indexSingleDocumentWithNumericFieldsGeneratedFromText(true, randomBoolean()); - String[] fieldsList = {"token_count", "text.token_count"}; - assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList); - flush(); - //after flush - document is in not anymore translog - only indexed - assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList); - } - - void indexSingleDocumentWithNumericFieldsGeneratedFromText(boolean stored, boolean sourceEnabled) { - String storedString = stored ? "true" : "false"; - String createIndexSource = "{\n" + - " \"settings\": {\n" + - " \"index.translog.flush_threshold_size\": \"1pb\",\n" + - " \"refresh_interval\": \"-1\"\n" + - " },\n" + - " \"mappings\": {\n" + - " \"doc\": {\n" + - " \"_source\" : {\"enabled\" : " + sourceEnabled + "}," + - " \"properties\": {\n" + - " \"token_count\": {\n" + - " \"type\": \"token_count\",\n" + - " \"analyzer\": \"standard\",\n" + - " \"store\": \"" + storedString + "\"" + - " },\n" + - " \"text\": {\n" + - " \"type\": \"text\",\n" + - " \"fields\": {\n" + - " \"token_count\": {\n" + - " \"type\": \"token_count\",\n" + - " \"analyzer\": \"standard\",\n" + - " \"store\": \"" + storedString + "\"" + - " }\n" + - " }\n" + - " }" + - " }\n" + - " }\n" + - " }\n" + - "}"; - - assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSource(createIndexSource, XContentType.JSON)); - ensureGreen(); - String doc = "{\n" + - " \"token_count\": \"A text with five words.\",\n" + - " \"text\": \"A text with five words.\"\n" + - "}\n"; - index("test", "doc", "1", doc); - } - private void assertGetFieldsAlwaysWorks(String index, String type, String docId, String[] fields) { assertGetFieldsAlwaysWorks(index, type, docId, fields, null); } @@ -997,18 +934,6 @@ private void assertGetFieldWorks(String index, String type, String docId, String assertNotNull(response.getField(field)); } - private void assertGetFieldException(String index, String type, String docId, String field) { - try { - client().prepareGet().setIndex(index).setType(type).setId(docId).setStoredFields(field); - fail(); - } catch (ElasticsearchException e) { - assertTrue(e.getMessage().contains("You can only get this field after refresh() has been called.")); - } - MultiGetResponse multiGetResponse = client().prepareMultiGet().add(new MultiGetRequest.Item(index, type, docId).storedFields(field)).get(); - assertNull(multiGetResponse.getResponses()[0].getResponse()); - assertTrue(multiGetResponse.getResponses()[0].getFailure().getMessage().contains("You can only get this field after refresh() has been called.")); - } - protected void assertGetFieldsNull(String index, String type, String docId, String[] fields) { assertGetFieldsNull(index, type, docId, fields, null); } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java b/core/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java index eb1148e9f4598..26fc15bf6621b 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java @@ -106,14 +106,6 @@ private void testMultiField(String mapping) throws Exception { assertThat(docMapper.mappers().getMapper("name.test1").fieldType().tokenized(), equalTo(true)); assertThat(docMapper.mappers().getMapper("name.test1").fieldType().eagerGlobalOrdinals(), equalTo(true)); - assertThat(docMapper.mappers().getMapper("name.test2"), notNullValue()); - assertThat(docMapper.mappers().getMapper("name.test2"), instanceOf(TokenCountFieldMapper.class)); - assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name.test2").fieldType().indexOptions()); - assertThat(docMapper.mappers().getMapper("name.test2").fieldType().stored(), equalTo(true)); - assertThat(docMapper.mappers().getMapper("name.test2").fieldType().tokenized(), equalTo(false)); - assertThat(((TokenCountFieldMapper) docMapper.mappers().getMapper("name.test2")).analyzer(), equalTo("simple")); - assertThat(((TokenCountFieldMapper) docMapper.mappers().getMapper("name.test2")).analyzer(), equalTo("simple")); - assertThat(docMapper.mappers().getMapper("object1.multi1"), notNullValue()); assertThat(docMapper.mappers().getMapper("object1.multi1"), instanceOf(DateFieldMapper.class)); assertThat(docMapper.mappers().getMapper("object1.multi1.string"), notNullValue()); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java b/core/src/test/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java index ae922e6a731f8..8dbddcc5daa54 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java @@ -130,42 +130,6 @@ public void testGeoPointMultiField() throws Exception { assertThat(countResponse.getHits().getTotalHits(), equalTo(1L)); } - public void testTokenCountMultiField() throws Exception { - assertAcked( - client().admin().indices().prepareCreate("my-index") - .addMapping("my-type", XContentFactory.jsonBuilder().startObject().startObject("my-type") - .startObject("properties") - .startObject("a") - .field("type", "token_count") - .field("analyzer", "simple") - .startObject("fields") - .startObject("b") - .field("type", "keyword") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject().endObject()) - ); - - GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("my-index").get(); - MappingMetaData mappingMetaData = getMappingsResponse.mappings().get("my-index").get("my-type"); - assertThat(mappingMetaData, not(nullValue())); - Map mappingSource = mappingMetaData.sourceAsMap(); - Map aField = ((Map) XContentMapValues.extractValue("properties.a", mappingSource)); - assertThat(aField.size(), equalTo(3)); - assertThat(aField.get("type").toString(), equalTo("token_count")); - assertThat(aField.get("fields"), notNullValue()); - - Map bField = ((Map) XContentMapValues.extractValue("properties.a.fields.b", mappingSource)); - assertThat(bField.size(), equalTo(1)); - assertThat(bField.get("type").toString(), equalTo("keyword")); - - client().prepareIndex("my-index", "my-type", "1").setSource("a", "my tokens").setRefreshPolicy(IMMEDIATE).get(); - SearchResponse countResponse = client().prepareSearch("my-index").setSize(0).setQuery(matchQuery("a.b", "my tokens")).get(); - assertThat(countResponse.getHits().getTotalHits(), equalTo(1L)); - } - public void testCompletionMultiField() throws Exception { assertAcked( client().admin().indices().prepareCreate("my-index") diff --git a/core/src/test/java/org/elasticsearch/index/mapper/NumberFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/NumberFieldTypeTests.java index 0066c5a7798b6..6d5ca1add74d5 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/NumberFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/NumberFieldTypeTests.java @@ -49,9 +49,7 @@ import java.math.BigInteger; import java.nio.charset.StandardCharsets; import java.util.Arrays; -import java.util.HashMap; import java.util.List; -import java.util.Map; import java.util.function.Supplier; import static org.hamcrest.Matchers.containsString; @@ -500,4 +498,25 @@ static OutOfRangeSpec of(NumberType t, V v, String m) { message = m; } } + + public void testDisplayValue() { + for (NumberFieldMapper.NumberType type : NumberFieldMapper.NumberType.values()) { + NumberFieldMapper.NumberFieldType fieldType = new NumberFieldMapper.NumberFieldType(type); + assertNull(fieldType.valueForDisplay(null)); + } + assertEquals(Byte.valueOf((byte) 3), + new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.BYTE).valueForDisplay(3)); + assertEquals(Short.valueOf((short) 3), + new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.SHORT).valueForDisplay(3)); + assertEquals(Integer.valueOf(3), + new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER).valueForDisplay(3)); + assertEquals(Long.valueOf(3), + new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG).valueForDisplay(3L)); + assertEquals(Double.valueOf(1.2), + new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.HALF_FLOAT).valueForDisplay(1.2)); + assertEquals(Double.valueOf(1.2), + new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.FLOAT).valueForDisplay(1.2)); + assertEquals(Double.valueOf(1.2), + new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE).valueForDisplay(1.2)); + } } diff --git a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java index b101a68185590..a3f90498b52a2 100644 --- a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java @@ -19,8 +19,6 @@ package org.elasticsearch.index.query; -import com.carrotsearch.randomizedtesting.generators.RandomPicks; - import org.apache.lucene.document.IntPoint; import org.apache.lucene.document.LongPoint; import org.apache.lucene.index.Term; @@ -33,7 +31,6 @@ import org.apache.lucene.search.TermRangeQuery; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.FieldNamesFieldMapper; @@ -64,13 +61,13 @@ protected RangeQueryBuilder doCreateTestQueryBuilder() { switch (randomIntBetween(0, 2)) { case 0: // use mapped integer field for numeric range queries - query = new RangeQueryBuilder(randomBoolean() ? INT_FIELD_NAME : INT_RANGE_FIELD_NAME); + query = new RangeQueryBuilder(INT_FIELD_NAME); query.from(randomIntBetween(1, 100)); query.to(randomIntBetween(101, 200)); break; case 1: // use mapped date field, using date string representation - query = new RangeQueryBuilder(randomBoolean() ? DATE_FIELD_NAME : DATE_RANGE_FIELD_NAME); + query = new RangeQueryBuilder(DATE_FIELD_NAME); query.from(new DateTime(System.currentTimeMillis() - randomIntBetween(0, 1000000), DateTimeZone.UTC).toString()); query.to(new DateTime(System.currentTimeMillis() + randomIntBetween(0, 1000000), DateTimeZone.UTC).toString()); // Create timestamp option only then we have a date mapper, @@ -98,9 +95,6 @@ protected RangeQueryBuilder doCreateTestQueryBuilder() { if (randomBoolean()) { query.to(null); } - if (query.fieldName().equals(INT_RANGE_FIELD_NAME) || query.fieldName().equals(DATE_RANGE_FIELD_NAME)) { - query.relation(RandomPicks.randomFrom(random(), ShapeRelation.values()).getRelationName()); - } return query; } @@ -137,9 +131,7 @@ protected void doAssertLuceneQuery(RangeQueryBuilder queryBuilder, Query query, } else if (getCurrentTypes().length == 0 || (queryBuilder.fieldName().equals(DATE_FIELD_NAME) == false - && queryBuilder.fieldName().equals(INT_FIELD_NAME) == false - && queryBuilder.fieldName().equals(DATE_RANGE_FIELD_NAME) == false - && queryBuilder.fieldName().equals(INT_RANGE_FIELD_NAME) == false)) { + && queryBuilder.fieldName().equals(INT_FIELD_NAME) == false)) { assertThat(query, instanceOf(TermRangeQuery.class)); TermRangeQuery termRangeQuery = (TermRangeQuery) query; assertThat(termRangeQuery.getField(), equalTo(queryBuilder.fieldName())); @@ -215,9 +207,6 @@ protected void doAssertLuceneQuery(RangeQueryBuilder queryBuilder, Query query, maxInt--; } } - } else if (queryBuilder.fieldName().equals(DATE_RANGE_FIELD_NAME) - || queryBuilder.fieldName().equals(INT_RANGE_FIELD_NAME)) { - // todo can't check RangeFieldQuery because its currently package private (this will change) } else { throw new UnsupportedOperationException(); } diff --git a/core/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java index 6abff5fbcdec6..79f9af61408b2 100644 --- a/core/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java @@ -79,8 +79,7 @@ protected TermsQueryBuilder doCreateTestQueryBuilder() { String fieldName; do { fieldName = getRandomFieldName(); - } while (fieldName.equals(GEO_POINT_FIELD_NAME) || fieldName.equals(GEO_SHAPE_FIELD_NAME) - || fieldName.equals(INT_RANGE_FIELD_NAME) || fieldName.equals(DATE_RANGE_FIELD_NAME)); + } while (fieldName.equals(GEO_POINT_FIELD_NAME) || fieldName.equals(GEO_SHAPE_FIELD_NAME)); Object[] values = new Object[randomInt(5)]; for (int i = 0; i < values.length; i++) { values[i] = getRandomValueForFieldName(fieldName); diff --git a/core/src/test/java/org/elasticsearch/search/query/QueryStringIT.java b/core/src/test/java/org/elasticsearch/search/query/QueryStringIT.java index a44d6ff6d5e84..ac72fc6fcb96d 100644 --- a/core/src/test/java/org/elasticsearch/search/query/QueryStringIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/QueryStringIT.java @@ -71,10 +71,6 @@ public void setup() throws Exception { ensureGreen("test"); } - private QueryStringQueryBuilder lenientQuery(String queryText) { - return queryStringQuery(queryText).lenient(true); - } - public void testBasicAllQuery() throws Exception { List reqs = new ArrayList<>(); reqs.add(client().prepareIndex("test", "doc", "1").setSource("f1", "foo bar baz")); @@ -177,8 +173,6 @@ public void testDocWithAllTypes() throws Exception { assertHits(resp.getHits(), "1"); resp = client().prepareSearch("test").setQuery(queryStringQuery("1.5")).get(); assertHits(resp.getHits(), "1"); - resp = client().prepareSearch("test").setQuery(queryStringQuery("12.23")).get(); - assertHits(resp.getHits(), "1"); resp = client().prepareSearch("test").setQuery(queryStringQuery("127.0.0.1")).get(); assertHits(resp.getHits(), "1"); // binary doesn't match diff --git a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index 3c4a0f8823024..fb2fbd337dd2b 100644 --- a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -1890,19 +1890,4 @@ public void testQueryStringParserCache() throws Exception { } } - public void testRangeQueryRangeFields_24744() throws Exception { - assertAcked(prepareCreate("test") - .addMapping("type1", "int_range", "type=integer_range")); - - client().prepareIndex("test", "type1", "1") - .setSource(jsonBuilder() - .startObject() - .startObject("int_range").field("gte", 10).field("lte", 20).endObject() - .endObject()).get(); - refresh(); - - RangeQueryBuilder range = new RangeQueryBuilder("int_range").relation("intersects").from(Integer.MIN_VALUE).to(Integer.MAX_VALUE); - SearchResponse searchResponse = client().prepareSearch("test").setQuery(range).get(); - assertHitCount(searchResponse, 1); - } } diff --git a/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java b/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java index 398b30abbe14e..bd4bf0624feb1 100644 --- a/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java @@ -498,8 +498,6 @@ public void testDocWithAllTypes() throws Exception { assertHits(resp.getHits(), "1"); resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("1.5")).get(); assertHits(resp.getHits(), "1"); - resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("12.23")).get(); - assertHits(resp.getHits(), "1"); resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.1")).get(); assertHits(resp.getHits(), "1"); // binary doesn't match diff --git a/core/src/test/resources/org/elasticsearch/index/mapper/multifield/test-multi-fields.json b/core/src/test/resources/org/elasticsearch/index/mapper/multifield/test-multi-fields.json index 7d4f819a2508f..b7317aba3c148 100644 --- a/core/src/test/resources/org/elasticsearch/index/mapper/multifield/test-multi-fields.json +++ b/core/src/test/resources/org/elasticsearch/index/mapper/multifield/test-multi-fields.json @@ -18,12 +18,6 @@ "type": "text", "store": true, "eager_global_ordinals": true - }, - "test2": { - "type": "token_count", - "index": true, - "store": true, - "analyzer": "simple" } } }, diff --git a/core/src/test/resources/org/elasticsearch/search/query/all-example-document.json b/core/src/test/resources/org/elasticsearch/search/query/all-example-document.json index 9e4d04930a71a..abc22939b6422 100644 --- a/core/src/test/resources/org/elasticsearch/search/query/all-example-document.json +++ b/core/src/test/resources/org/elasticsearch/search/query/all-example-document.json @@ -21,7 +21,6 @@ "f_long": "42", "f_float": "1.7", "f_hfloat": "1.5", - "f_sfloat": "12.23", "f_ip": "127.0.0.1", "f_binary": "VGhpcyBpcyBzb21lIGJpbmFyeSBkYXRhCg==", "f_suggest": { diff --git a/core/src/test/resources/org/elasticsearch/search/query/all-query-index.json b/core/src/test/resources/org/elasticsearch/search/query/all-query-index.json index 89c412171254a..3b068132d5142 100644 --- a/core/src/test/resources/org/elasticsearch/search/query/all-query-index.json +++ b/core/src/test/resources/org/elasticsearch/search/query/all-query-index.json @@ -18,8 +18,7 @@ "f_multi": { "type": "text", "fields": { - "raw": {"type": "keyword"}, - "f_token_count": {"type": "token_count", "analyzer": "standard"} + "raw": {"type": "keyword"} } }, "f_object": { @@ -49,7 +48,6 @@ "f_long": {"type": "long"}, "f_float": {"type": "float"}, "f_hfloat": {"type": "half_float"}, - "f_sfloat": {"type": "scaled_float", "scaling_factor": 100}, "f_ip": {"type": "ip"}, "f_binary": {"type": "binary"}, "f_suggest": {"type": "completion"}, diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index 87e3d265f4292..dddc98ae138ad 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -24,6 +24,10 @@ esplugin { classname 'org.elasticsearch.painless.PainlessPlugin' } +integTestCluster { + module project.project(':modules:mapper-extras') +} + dependencies { compile 'org.antlr:antlr4-runtime:4.5.1-1' compile 'org.ow2.asm:asm-debug-all:5.1' diff --git a/modules/mapper-extras/build.gradle b/modules/mapper-extras/build.gradle new file mode 100644 index 0000000000000..7831de3a68e94 --- /dev/null +++ b/modules/mapper-extras/build.gradle @@ -0,0 +1,23 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +esplugin { + description 'Adds advanced field mappers' + classname 'org.elasticsearch.index.mapper.MapperExtrasPlugin' +} diff --git a/core/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java b/modules/mapper-extras/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java similarity index 100% rename from core/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java rename to modules/mapper-extras/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java diff --git a/core/src/main/java/org/elasticsearch/index/mapper/BinaryRangeUtil.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/BinaryRangeUtil.java similarity index 100% rename from core/src/main/java/org/elasticsearch/index/mapper/BinaryRangeUtil.java rename to modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/BinaryRangeUtil.java diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java new file mode 100644 index 0000000000000..d91d2b28df821 --- /dev/null +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.plugins.MapperPlugin; +import org.elasticsearch.plugins.Plugin; + +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.Map; + +public class MapperExtrasPlugin extends Plugin implements MapperPlugin { + + @Override + public Map getMappers() { + Map mappers = new LinkedHashMap<>(); + mappers.put(ScaledFloatFieldMapper.CONTENT_TYPE, new ScaledFloatFieldMapper.TypeParser()); + mappers.put(TokenCountFieldMapper.CONTENT_TYPE, new TokenCountFieldMapper.TypeParser()); + for (RangeFieldMapper.RangeType type : RangeFieldMapper.RangeType.values()) { + mappers.put(type.typeName(), new RangeFieldMapper.TypeParser(type)); + } + return Collections.unmodifiableMap(mappers); + } + +} diff --git a/core/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java similarity index 98% rename from core/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java rename to modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java index c243858cd9155..d6c453bd6e6e1 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java @@ -289,21 +289,17 @@ protected DateMathParser dateMathParser() { @Override public Query termQuery(Object value, QueryShardContext context) { - Query query = rangeQuery(value, value, true, true, ShapeRelation.INTERSECTS, context); + Query query = rangeQuery(value, value, true, true, ShapeRelation.INTERSECTS, null, null, context); if (boost() != 1f) { query = new BoostQuery(query, boost()); } return query; } - public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, - ShapeRelation relation, QueryShardContext context) { - failIfNotIndexed(); - return rangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, relation, null, dateMathParser, context); - } - + @Override public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, ShapeRelation relation, DateTimeZone timeZone, DateMathParser parser, QueryShardContext context) { + failIfNotIndexed(); return rangeType.rangeQuery(name(), hasDocValues(), lowerTerm, upperTerm, includeLower, includeUpper, relation, timeZone, parser, context); } @@ -525,9 +521,6 @@ public Query intersectsQuery(String field, Object from, Object to, boolean inclu return InetAddressRange.newIntersectsQuery(field, includeLower ? lower : nextUp(lower), includeUpper ? upper : nextDown(upper)); } - public String toString(InetAddress address) { - return InetAddresses.toAddrString(address); - } }, DATE("date_range", NumberType.LONG) { @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java similarity index 100% rename from core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java rename to modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java diff --git a/core/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java similarity index 100% rename from core/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java rename to modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java diff --git a/core/src/test/java/org/apache/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java b/modules/mapper-extras/src/test/java/org/apache/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java similarity index 92% rename from core/src/test/java/org/apache/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java rename to modules/mapper-extras/src/test/java/org/apache/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java index b83dac78d070e..fcc9f67229f87 100644 --- a/core/src/test/java/org/apache/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java +++ b/modules/mapper-extras/src/test/java/org/apache/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java @@ -49,7 +49,7 @@ public void testRandomBig() throws Exception { @Override protected final Field newRangeField(Range box) { - AbstractRange testRange = (AbstractRange) box; + AbstractRange testRange = (AbstractRange) box; RangeFieldMapper.Range range = new RangeFieldMapper.Range(rangeType(), testRange.getMin(), testRange.getMax(), true , true); try { BytesRef encodeRange = rangeType().encodeRanges(Collections.singleton(range)); @@ -61,25 +61,25 @@ protected final Field newRangeField(Range box) { @Override protected final Query newIntersectsQuery(Range box) { - AbstractRange testRange = (AbstractRange) box; + AbstractRange testRange = (AbstractRange) box; return rangeType().dvRangeQuery(fieldName(), INTERSECTS, testRange.getMin(), testRange.getMax(), true, true); } @Override protected final Query newContainsQuery(Range box) { - AbstractRange testRange = (AbstractRange) box; + AbstractRange testRange = (AbstractRange) box; return rangeType().dvRangeQuery(fieldName(), CONTAINS, testRange.getMin(), testRange.getMax(), true, true); } @Override protected final Query newWithinQuery(Range box) { - AbstractRange testRange = (AbstractRange) box; + AbstractRange testRange = (AbstractRange) box; return rangeType().dvRangeQuery(fieldName(), WITHIN, testRange.getMin(), testRange.getMax(), true, true); } @Override protected final Query newCrossesQuery(Range box) { - AbstractRange testRange = (AbstractRange) box; + AbstractRange testRange = (AbstractRange) box; return rangeType().dvRangeQuery(fieldName(), CROSSES, testRange.getMin(), testRange.getMax(), true, true); } @@ -116,7 +116,7 @@ protected final Object getMax(int dim) { @Override protected final boolean isEqual(Range o) { - AbstractRange other = (AbstractRange) o; + AbstractRange other = (AbstractRange) o; return Objects.equals(getMin(), other.getMin()) && Objects.equals(getMax(), other.getMax()); } diff --git a/core/src/test/java/org/apache/lucene/queries/BinaryDocValuesRangeQueryTests.java b/modules/mapper-extras/src/test/java/org/apache/lucene/queries/BinaryDocValuesRangeQueryTests.java similarity index 100% rename from core/src/test/java/org/apache/lucene/queries/BinaryDocValuesRangeQueryTests.java rename to modules/mapper-extras/src/test/java/org/apache/lucene/queries/BinaryDocValuesRangeQueryTests.java diff --git a/core/src/test/java/org/apache/lucene/queries/DoubleRandomBinaryDocValuesRangeQueryTests.java b/modules/mapper-extras/src/test/java/org/apache/lucene/queries/DoubleRandomBinaryDocValuesRangeQueryTests.java similarity index 96% rename from core/src/test/java/org/apache/lucene/queries/DoubleRandomBinaryDocValuesRangeQueryTests.java rename to modules/mapper-extras/src/test/java/org/apache/lucene/queries/DoubleRandomBinaryDocValuesRangeQueryTests.java index aa15a80319510..984b1d72ef843 100644 --- a/core/src/test/java/org/apache/lucene/queries/DoubleRandomBinaryDocValuesRangeQueryTests.java +++ b/modules/mapper-extras/src/test/java/org/apache/lucene/queries/DoubleRandomBinaryDocValuesRangeQueryTests.java @@ -56,7 +56,7 @@ private double nextDoubleInternal() { } } - private static class DoubleTestRange extends AbstractRange { + private static class DoubleTestRange extends AbstractRange { double min; double max; @@ -66,7 +66,7 @@ private static class DoubleTestRange extends AbstractRange { } @Override - public Object getMin() { + public Double getMin() { return min; } @@ -82,7 +82,7 @@ protected void setMin(int dim, Object val) { } @Override - public Object getMax() { + public Double getMax() { return max; } diff --git a/core/src/test/java/org/apache/lucene/queries/FloatRandomBinaryDocValuesRangeQueryTests.java b/modules/mapper-extras/src/test/java/org/apache/lucene/queries/FloatRandomBinaryDocValuesRangeQueryTests.java similarity index 96% rename from core/src/test/java/org/apache/lucene/queries/FloatRandomBinaryDocValuesRangeQueryTests.java rename to modules/mapper-extras/src/test/java/org/apache/lucene/queries/FloatRandomBinaryDocValuesRangeQueryTests.java index 8a04a50448fed..a7f877392cf43 100644 --- a/core/src/test/java/org/apache/lucene/queries/FloatRandomBinaryDocValuesRangeQueryTests.java +++ b/modules/mapper-extras/src/test/java/org/apache/lucene/queries/FloatRandomBinaryDocValuesRangeQueryTests.java @@ -56,7 +56,7 @@ private float nextFloatInternal() { } } - private static class FloatTestRange extends AbstractRange { + private static class FloatTestRange extends AbstractRange { float min; float max; @@ -66,7 +66,7 @@ private static class FloatTestRange extends AbstractRange { } @Override - public Object getMin() { + public Float getMin() { return min; } @@ -82,7 +82,7 @@ protected void setMin(int dim, Object val) { } @Override - public Object getMax() { + public Float getMax() { return max; } diff --git a/core/src/test/java/org/apache/lucene/queries/InetAddressRandomBinaryDocValuesRangeQueryTests.java b/modules/mapper-extras/src/test/java/org/apache/lucene/queries/InetAddressRandomBinaryDocValuesRangeQueryTests.java similarity index 96% rename from core/src/test/java/org/apache/lucene/queries/InetAddressRandomBinaryDocValuesRangeQueryTests.java rename to modules/mapper-extras/src/test/java/org/apache/lucene/queries/InetAddressRandomBinaryDocValuesRangeQueryTests.java index 1592e89d174eb..2def2702d38b3 100644 --- a/core/src/test/java/org/apache/lucene/queries/InetAddressRandomBinaryDocValuesRangeQueryTests.java +++ b/modules/mapper-extras/src/test/java/org/apache/lucene/queries/InetAddressRandomBinaryDocValuesRangeQueryTests.java @@ -67,7 +67,7 @@ private InetAddress nextInetaddress() throws UnknownHostException { } } - private static class IpRange extends AbstractRange { + private static class IpRange extends AbstractRange { InetAddress minAddress; InetAddress maxAddress; byte[] min; @@ -81,7 +81,7 @@ private static class IpRange extends AbstractRange { } @Override - public Object getMin() { + public InetAddress getMin() { return minAddress; } @@ -101,7 +101,7 @@ protected void setMin(int dim, Object val) { } @Override - public Object getMax() { + public InetAddress getMax() { return maxAddress; } diff --git a/core/src/test/java/org/apache/lucene/queries/IntegerRandomBinaryDocValuesRangeQueryTests.java b/modules/mapper-extras/src/test/java/org/apache/lucene/queries/IntegerRandomBinaryDocValuesRangeQueryTests.java similarity index 96% rename from core/src/test/java/org/apache/lucene/queries/IntegerRandomBinaryDocValuesRangeQueryTests.java rename to modules/mapper-extras/src/test/java/org/apache/lucene/queries/IntegerRandomBinaryDocValuesRangeQueryTests.java index 6fe59b8827312..1d04cdbaaca86 100644 --- a/core/src/test/java/org/apache/lucene/queries/IntegerRandomBinaryDocValuesRangeQueryTests.java +++ b/modules/mapper-extras/src/test/java/org/apache/lucene/queries/IntegerRandomBinaryDocValuesRangeQueryTests.java @@ -64,7 +64,7 @@ private int nextIntInternal() { } } - private static class IntTestRange extends AbstractRange { + private static class IntTestRange extends AbstractRange { int min; int max; @@ -74,7 +74,7 @@ private static class IntTestRange extends AbstractRange { } @Override - public Object getMin() { + public Integer getMin() { return min; } @@ -90,7 +90,7 @@ protected void setMin(int dim, Object val) { } @Override - public Object getMax() { + public Integer getMax() { return max; } diff --git a/core/src/test/java/org/apache/lucene/queries/LongRandomBinaryDocValuesRangeQueryTests.java b/modules/mapper-extras/src/test/java/org/apache/lucene/queries/LongRandomBinaryDocValuesRangeQueryTests.java similarity index 96% rename from core/src/test/java/org/apache/lucene/queries/LongRandomBinaryDocValuesRangeQueryTests.java rename to modules/mapper-extras/src/test/java/org/apache/lucene/queries/LongRandomBinaryDocValuesRangeQueryTests.java index 139cb3f0b1282..e506c2c269028 100644 --- a/core/src/test/java/org/apache/lucene/queries/LongRandomBinaryDocValuesRangeQueryTests.java +++ b/modules/mapper-extras/src/test/java/org/apache/lucene/queries/LongRandomBinaryDocValuesRangeQueryTests.java @@ -64,7 +64,7 @@ private long nextLongInternal() { } } - private static class LongTestRange extends AbstractRange { + private static class LongTestRange extends AbstractRange { long min; long max; @@ -74,7 +74,7 @@ private static class LongTestRange extends AbstractRange { } @Override - public Object getMin() { + public Long getMin() { return min; } @@ -90,7 +90,7 @@ protected void setMin(int dim, Object val) { } @Override - public Object getMax() { + public Long getMax() { return max; } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/template/BWCTemplateTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/BWCTemplateTests.java similarity index 82% rename from core/src/test/java/org/elasticsearch/action/admin/indices/template/BWCTemplateTests.java rename to modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/BWCTemplateTests.java index 40c314edd41cd..1d9671218c456 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/template/BWCTemplateTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/BWCTemplateTests.java @@ -17,11 +17,15 @@ * under the License. */ -package org.elasticsearch.action.admin.indices.template; +package org.elasticsearch.index.mapper; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.mapper.MapperExtrasPlugin; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; +import java.util.Collection; + import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath; /** @@ -29,11 +33,16 @@ * prior to their 5.x releases work for newly created indices */ public class BWCTemplateTests extends ESSingleNodeTestCase { + @Override + protected Collection> getPlugins() { + return pluginList(MapperExtrasPlugin.class); + } + public void testBeatsTemplatesBWC() throws Exception { - byte[] metricBeat = copyToBytesFromClasspath("/org/elasticsearch/action/admin/indices/template/metricbeat-5.0.template.json"); - byte[] packetBeat = copyToBytesFromClasspath("/org/elasticsearch/action/admin/indices/template/packetbeat-5.0.template.json"); - byte[] fileBeat = copyToBytesFromClasspath("/org/elasticsearch/action/admin/indices/template/filebeat-5.0.template.json"); - byte[] winLogBeat = copyToBytesFromClasspath("/org/elasticsearch/action/admin/indices/template/winlogbeat-5.0.template.json"); + byte[] metricBeat = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/metricbeat-5.0.template.json"); + byte[] packetBeat = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/packetbeat-5.0.template.json"); + byte[] fileBeat = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/filebeat-5.0.template.json"); + byte[] winLogBeat = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/winlogbeat-5.0.template.json"); client().admin().indices().preparePutTemplate("metricbeat").setSource(metricBeat, XContentType.JSON).get(); client().admin().indices().preparePutTemplate("packetbeat").setSource(packetBeat, XContentType.JSON).get(); client().admin().indices().preparePutTemplate("filebeat").setSource(fileBeat, XContentType.JSON).get(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/BinaryRangeUtilTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/BinaryRangeUtilTests.java similarity index 100% rename from core/src/test/java/org/elasticsearch/index/mapper/BinaryRangeUtilTests.java rename to modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/BinaryRangeUtilTests.java diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/MapperExtrasClientYamlTestSuiteIT.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/MapperExtrasClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..e2f10791739f8 --- /dev/null +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/MapperExtrasClientYamlTestSuiteIT.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; + +/** Runs yaml rest tests */ +public class MapperExtrasClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + public MapperExtrasClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } +} + diff --git a/core/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java similarity index 98% rename from core/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java rename to modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java index 7bae878b92459..0742aeadcb58a 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java @@ -27,10 +27,13 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.InternalSettingsPlugin; import java.io.IOException; import java.net.InetAddress; import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.Locale; @@ -42,6 +45,12 @@ import static org.hamcrest.Matchers.containsString; public class RangeFieldMapperTests extends AbstractNumericFieldMapperTestCase { + + @Override + protected Collection> getPlugins() { + return pluginList(InternalSettingsPlugin.class, MapperExtrasPlugin.class); + } + private static String FROM_DATE = "2016-10-31"; private static String TO_DATE = "2016-11-01 20:00:00"; private static String FROM_IP = "::ffff:c0a8:107"; diff --git a/core/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java similarity index 99% rename from core/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java rename to modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java index 328e61c233091..810563555969a 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java @@ -95,7 +95,7 @@ public void testRangeQuery() throws Exception { Object to = nextTo(from); assertEquals(getExpectedRangeQuery(relation, from, to, includeLower, includeUpper), - ft.rangeQuery(from, to, includeLower, includeUpper, relation, context)); + ft.rangeQuery(from, to, includeLower, includeUpper, relation, null, null, context)); } private Query getExpectedRangeQuery(ShapeRelation relation, Object from, Object to, boolean includeLower, boolean includeUpper) { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java similarity index 99% rename from core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java rename to modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java index c0650c9c72e5b..42f8bff544912 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java @@ -50,7 +50,7 @@ public void setup() { @Override protected Collection> getPlugins() { - return pluginList(InternalSettingsPlugin.class); + return pluginList(InternalSettingsPlugin.class, MapperExtrasPlugin.class); } public void testDefaults() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java similarity index 100% rename from core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java rename to modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java diff --git a/core/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperIntegrationIT.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperIntegrationIT.java similarity index 99% rename from core/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperIntegrationIT.java rename to modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperIntegrationIT.java index 75b588df85ad3..3d69b0d013e29 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperIntegrationIT.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperIntegrationIT.java @@ -220,7 +220,8 @@ private void assertSearchHit(SearchHit hit, int[] standardTermCounts, int[] engl assertThat(hit.field("foo.token_count_without_position_increments"), not(nullValue())); assertThat(hit.field("foo.token_count_without_position_increments").getValues().size(), equalTo(englishTermCounts.length)); for (int i = 0; i < englishTermCounts.length; i++) { - assertThat((Integer) hit.field("foo.token_count_without_position_increments").getValues().get(i), equalTo(englishTermCounts[i])); + assertThat((Integer) hit.field("foo.token_count_without_position_increments").getValues().get(i), + equalTo(englishTermCounts[i])); } if (loadCountedFields && storeCountedFields) { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java similarity index 99% rename from core/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java rename to modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java index 861586370aef8..633f10276096c 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java @@ -45,6 +45,12 @@ * Test for {@link TokenCountFieldMapper}. */ public class TokenCountFieldMapperTests extends ESSingleNodeTestCase { + + @Override + protected Collection> getPlugins() { + return pluginList(InternalSettingsPlugin.class, MapperExtrasPlugin.class); + } + public void testMerge() throws IOException { String stage1Mapping = XContentFactory.jsonBuilder().startObject() .startObject("person") @@ -122,11 +128,6 @@ public TokenStreamComponents createComponents(String fieldName) { return analyzer; } - @Override - protected Collection> getPlugins() { - return pluginList(InternalSettingsPlugin.class); - } - public void testEmptyName() throws IOException { IndexService indexService = createIndex("test"); DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); diff --git a/core/src/test/resources/org/elasticsearch/action/admin/indices/template/filebeat-5.0.template.json b/modules/mapper-extras/src/test/resources/org/elasticsearch/index/mapper/filebeat-5.0.template.json similarity index 100% rename from core/src/test/resources/org/elasticsearch/action/admin/indices/template/filebeat-5.0.template.json rename to modules/mapper-extras/src/test/resources/org/elasticsearch/index/mapper/filebeat-5.0.template.json diff --git a/core/src/test/resources/org/elasticsearch/action/admin/indices/template/metricbeat-5.0.template.json b/modules/mapper-extras/src/test/resources/org/elasticsearch/index/mapper/metricbeat-5.0.template.json similarity index 100% rename from core/src/test/resources/org/elasticsearch/action/admin/indices/template/metricbeat-5.0.template.json rename to modules/mapper-extras/src/test/resources/org/elasticsearch/index/mapper/metricbeat-5.0.template.json diff --git a/core/src/test/resources/org/elasticsearch/action/admin/indices/template/packetbeat-5.0.template.json b/modules/mapper-extras/src/test/resources/org/elasticsearch/index/mapper/packetbeat-5.0.template.json similarity index 100% rename from core/src/test/resources/org/elasticsearch/action/admin/indices/template/packetbeat-5.0.template.json rename to modules/mapper-extras/src/test/resources/org/elasticsearch/index/mapper/packetbeat-5.0.template.json diff --git a/core/src/test/resources/org/elasticsearch/action/admin/indices/template/winlogbeat-5.0.template.json b/modules/mapper-extras/src/test/resources/org/elasticsearch/index/mapper/winlogbeat-5.0.template.json similarity index 100% rename from core/src/test/resources/org/elasticsearch/action/admin/indices/template/winlogbeat-5.0.template.json rename to modules/mapper-extras/src/test/resources/org/elasticsearch/index/mapper/winlogbeat-5.0.template.json diff --git a/modules/mapper-extras/src/test/resources/rest-api-spec/test/range/10_basic.yml b/modules/mapper-extras/src/test/resources/rest-api-spec/test/range/10_basic.yml new file mode 100644 index 0000000000000..9fd54d6342d54 --- /dev/null +++ b/modules/mapper-extras/src/test/resources/rest-api-spec/test/range/10_basic.yml @@ -0,0 +1,334 @@ +setup: + - do: + indices.create: + index: test + body: + settings: + number_of_replicas: 0 + mappings: + doc: + "properties": + "integer_range": + "type" : "integer_range" + "long_range": + "type" : "long_range" + "float_range": + "type" : "float_range" + "double_range": + "type" : "double_range" + "date_range": + "type" : "date_range" + "ip_range": + "type" : "ip_range" + +--- +"Integer range": + + - do: + index: + index: test + type: doc + id: 1 + body: { "integer_range" : { "gte": 1, "lte": 5 } } + + - do: + index: + index: test + type: doc + id: 2 + body: { "integer_range" : { "gte": 1, "lte": 3 } } + + - do: + index: + index: test + type: doc + id: 3 + body: { "integer_range" : { "gte": 4, "lte": 5 } } + + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "integer_range" : { "gte": 3, "lte" : 4 } } } } + + - match: { hits.total: 3 } + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "integer_range" : { "gte": 3, "lte" : 4, "relation": "intersects" } } } } + + - match: { hits.total: 3 } + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "integer_range" : { "gte": 3, "lte" : 4, "relation": "contains" } } } } + + - match: { hits.total: 1 } + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "integer_range" : { "gte": 3, "lte" : 4, "relation": "within" } } } } + + - match: { hits.total: 0 } + +--- +"Long range": + + - do: + index: + index: test + type: doc + id: 1 + body: { "long_range" : { "gte": 1, "lte": 5 } } + + - do: + index: + index: test + type: doc + id: 2 + body: { "long_range" : { "gte": 1, "lte": 3 } } + + - do: + index: + index: test + type: doc + id: 3 + body: { "long_range" : { "gte": 4, "lte": 5 } } + + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "long_range" : { "gte": 3, "lte" : 4 } } } } + + - match: { hits.total: 3 } + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "long_range" : { "gte": 3, "lte" : 4, "relation": "intersects" } } } } + + - match: { hits.total: 3 } + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "long_range" : { "gte": 3, "lte" : 4, "relation": "contains" } } } } + + - match: { hits.total: 1 } + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "long_range" : { "gte": 3, "lte" : 4, "relation": "within" } } } } + + - match: { hits.total: 0 } + +--- +"Float range": + + - do: + index: + index: test + type: doc + id: 1 + body: { "float_range" : { "gte": 1, "lte": 5 } } + + - do: + index: + index: test + type: doc + id: 2 + body: { "float_range" : { "gte": 1, "lte": 3 } } + + - do: + index: + index: test + type: doc + id: 3 + body: { "float_range" : { "gte": 4, "lte": 5 } } + + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "float_range" : { "gte": 3, "lte" : 4 } } } } + + - match: { hits.total: 3 } + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "float_range" : { "gte": 3, "lte" : 4, "relation": "intersects" } } } } + + - match: { hits.total: 3 } + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "float_range" : { "gte": 3, "lte" : 4, "relation": "contains" } } } } + + - match: { hits.total: 1 } + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "float_range" : { "gte": 3, "lte" : 4, "relation": "within" } } } } + + - match: { hits.total: 0 } + +--- +"Double range": + + - do: + index: + index: test + type: doc + id: 1 + body: { "double_range" : { "gte": 1, "lte": 5 } } + + - do: + index: + index: test + type: doc + id: 2 + body: { "double_range" : { "gte": 1, "lte": 3 } } + + - do: + index: + index: test + type: doc + id: 3 + body: { "double_range" : { "gte": 4, "lte": 5 } } + + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "double_range" : { "gte": 3, "lte" : 4 } } } } + + - match: { hits.total: 3 } + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "double_range" : { "gte": 3, "lte" : 4, "relation": "intersects" } } } } + + - match: { hits.total: 3 } + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "double_range" : { "gte": 3, "lte" : 4, "relation": "contains" } } } } + + - match: { hits.total: 1 } + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "double_range" : { "gte": 3, "lte" : 4, "relation": "within" } } } } + + - match: { hits.total: 0 } + +--- +"IP range": + + - do: + index: + index: test + type: doc + id: 1 + body: { "ip_range" : { "gte": "192.168.0.1", "lte": "192.168.0.5" } } + + - do: + index: + index: test + type: doc + id: 2 + body: { "ip_range" : { "gte": "192.168.0.1", "lte": "192.168.0.3" } } + + - do: + index: + index: test + type: doc + id: 3 + body: { "ip_range" : { "gte": "192.168.0.4", "lte": "192.168.0.5" } } + + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "ip_range" : { "gte": "192.168.0.3", "lte" : "192.168.0.4" } } } } + + - match: { hits.total: 3 } + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "ip_range" : { "gte": "192.168.0.3", "lte" : "192.168.0.4", "relation": "intersects" } } } } + + - match: { hits.total: 3 } + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "ip_range" : { "gte": "192.168.0.3", "lte" : "192.168.0.4", "relation": "contains" } } } } + + - match: { hits.total: 1 } + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "ip_range" : { "gte": "192.168.0.3", "lte" : "192.168.0.4", "relation": "within" } } } } + + - match: { hits.total: 0 } + +--- +"Date range": + + - do: + index: + index: test + type: doc + id: 1 + body: { "date_range" : { "gte": "2017-09-01", "lte": "2017-09-05" } } + + - do: + index: + index: test + type: doc + id: 2 + body: { "date_range" : { "gte": "2017-09-01", "lte": "2017-09-03" } } + + - do: + index: + index: test + type: doc + id: 3 + body: { "date_range" : { "gte": "2017-09-04", "lte": "2017-09-05" } } + + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "date_range" : { "gte": "2017-09-03", "lte" : "2017-09-04" } } } } + + - match: { hits.total: 3 } + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "date_range" : { "gte": "2017-09-03", "lte" : "2017-09-04", "relation": "intersects" } } } } + + - match: { hits.total: 3 } + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "date_range" : { "gte": "2017-09-03", "lte" : "2017-09-04", "relation": "contains" } } } } + + - match: { hits.total: 1 } + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "date_range" : { "gte": "2017-09-03", "lte" : "2017-09-04", "relation": "within" } } } } + + - match: { hits.total: 0 } diff --git a/modules/mapper-extras/src/test/resources/rest-api-spec/test/scaled_float/10_basic.yml b/modules/mapper-extras/src/test/resources/rest-api-spec/test/scaled_float/10_basic.yml new file mode 100644 index 0000000000000..6840d8aae20d6 --- /dev/null +++ b/modules/mapper-extras/src/test/resources/rest-api-spec/test/scaled_float/10_basic.yml @@ -0,0 +1,105 @@ +setup: + - do: + indices.create: + index: test + body: + settings: + number_of_replicas: 0 + mappings: + doc: + "properties": + "number": + "type" : "scaled_float" + "scaling_factor": 100 + + - do: + index: + index: test + type: doc + id: 1 + body: { "number" : 1 } + + - do: + index: + index: test + type: doc + id: 2 + body: { "number" : 1.53 } + + - do: + index: + index: test + type: doc + id: 3 + body: { "number" : -2.1 } + + - do: + index: + index: test + type: doc + id: 4 + body: { "number" : 1.53 } + + - do: + indices.refresh: {} + +--- +"Aggregations": + + - do: + search: + body: { "size" : 0, "aggs" : { "my_terms" : { "terms" : { "field" : "number" } } } } + + - match: { hits.total: 4 } + + - length: { aggregations.my_terms.buckets: 3 } + + - match: { aggregations.my_terms.buckets.0.key: 1.53 } + + - is_false: aggregations.my_terms.buckets.0.key_as_string + + - match: { aggregations.my_terms.buckets.0.doc_count: 2 } + + - match: { aggregations.my_terms.buckets.1.key: -2.1 } + + - is_false: aggregations.my_terms.buckets.1.key_as_string + + - match: { aggregations.my_terms.buckets.1.doc_count: 1 } + + - match: { aggregations.my_terms.buckets.2.key: 1 } + + - is_false: aggregations.my_terms.buckets.2.key_as_string + + - match: { aggregations.my_terms.buckets.2.doc_count: 1 } + +--- +"Search": + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "number" : { "gte" : -2 } } } } + + - match: { hits.total: 3 } + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "number" : { "gte" : 0 } } } } + + - match: { hits.total: 3 } + + - do: + search: + body: { "size" : 0, "query" : { "range" : { "number" : { "lt" : 1.5 } } } } + + - match: { hits.total: 2 } + +--- +"Sort": + + - do: + search: + body: { "size" : 1, "sort" : { "number" : { "order" : "asc" } } } + + - match: { hits.total: 4 } + - match: { hits.hits.0._id: "3" } + diff --git a/modules/percolator/build.gradle b/modules/percolator/build.gradle index cf55368861aef..36b93fd4d866f 100644 --- a/modules/percolator/build.gradle +++ b/modules/percolator/build.gradle @@ -25,7 +25,16 @@ esplugin { dependencies { // for testing hasChild and hasParent rejections + compile project(path: ':modules:mapper-extras', configuration: 'runtime') testCompile project(path: ':modules:parent-join', configuration: 'runtime') } + +dependencyLicenses { + // Don't check the client's license. We know it. + dependencies = project.configurations.runtime.fileCollection { + it.group.startsWith('org.elasticsearch') == false + } - project.configurations.provided +} + compileJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes" compileTestJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml index 9cc30bbcd1b45..5ac79a898816b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml @@ -20,9 +20,6 @@ setup: type: double number: type: long - scaled_float: - type: scaled_float - scaling_factor: 100 date: type: date @@ -300,56 +297,6 @@ setup: - match: { aggregations.double_terms.buckets.1.doc_count: 1 } ---- -"Scaled float test": - - skip: - version: " - 5.2.0" - reason: scaled_float were considered as longs in aggregations, this was fixed in 5.2.0 - - - do: - index: - index: test_1 - type: test - id: 1 - body: { "scaled_float": 9.99 } - - - do: - index: - index: test_1 - type: test - id: 2 - body: { "scaled_float": 9.994 } - - - do: - index: - index: test_1 - type: test - id: 3 - body: { "scaled_float": 8.99 } - - - do: - indices.refresh: {} - - - do: - search: - body: { "size" : 0, "aggs" : { "scaled_float_terms" : { "terms" : { "field" : "scaled_float" } } } } - - - match: { hits.total: 3 } - - - length: { aggregations.scaled_float_terms.buckets: 2 } - - - match: { aggregations.scaled_float_terms.buckets.0.key: 9.99 } - - - is_false: aggregations.scaled_float_terms.buckets.0.key_as_string - - - match: { aggregations.scaled_float_terms.buckets.0.doc_count: 2 } - - - match: { aggregations.scaled_float_terms.buckets.1.key: 8.99 } - - - is_false: aggregations.scaled_float_terms.buckets.1.key_as_string - - - match: { aggregations.scaled_float_terms.buckets.1.doc_count: 1 } - --- "Date test": - do: diff --git a/settings.gradle b/settings.gradle index 220dda259b221..698f5600684bb 100644 --- a/settings.gradle +++ b/settings.gradle @@ -35,6 +35,7 @@ List projects = [ 'modules:lang-expression', 'modules:lang-mustache', 'modules:lang-painless', + 'modules:mapper-extras', 'modules:parent-join', 'modules:percolator', 'modules:reindex', @@ -65,6 +66,7 @@ List projects = [ 'qa:auto-create-index', 'qa:evil-tests', 'qa:full-cluster-restart', + 'qa:integration-bwc', 'qa:mixed-cluster', 'qa:multi-cluster-search', 'qa:no-bootstrap-tests', diff --git a/core/src/test/java/org/elasticsearch/index/mapper/AbstractNumericFieldMapperTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/AbstractNumericFieldMapperTestCase.java similarity index 100% rename from core/src/test/java/org/elasticsearch/index/mapper/AbstractNumericFieldMapperTestCase.java rename to test/framework/src/main/java/org/elasticsearch/index/mapper/AbstractNumericFieldMapperTestCase.java diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index c38b6b759de5b..d56db722def82 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -137,19 +137,17 @@ public abstract class AbstractQueryTestCase> public static final String STRING_FIELD_NAME = "mapped_string"; protected static final String STRING_FIELD_NAME_2 = "mapped_string_2"; protected static final String INT_FIELD_NAME = "mapped_int"; - protected static final String INT_RANGE_FIELD_NAME = "mapped_int_range"; protected static final String DOUBLE_FIELD_NAME = "mapped_double"; protected static final String BOOLEAN_FIELD_NAME = "mapped_boolean"; protected static final String DATE_FIELD_NAME = "mapped_date"; - protected static final String DATE_RANGE_FIELD_NAME = "mapped_date_range"; protected static final String OBJECT_FIELD_NAME = "mapped_object"; protected static final String GEO_POINT_FIELD_NAME = "mapped_geo_point"; protected static final String GEO_SHAPE_FIELD_NAME = "mapped_geo_shape"; - protected static final String[] MAPPED_FIELD_NAMES = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME, INT_RANGE_FIELD_NAME, - DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, DATE_RANGE_FIELD_NAME, OBJECT_FIELD_NAME, GEO_POINT_FIELD_NAME, + protected static final String[] MAPPED_FIELD_NAMES = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME, + DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, OBJECT_FIELD_NAME, GEO_POINT_FIELD_NAME, GEO_SHAPE_FIELD_NAME}; - private static final String[] MAPPED_LEAF_FIELD_NAMES = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME, INT_RANGE_FIELD_NAME, - DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, DATE_RANGE_FIELD_NAME, GEO_POINT_FIELD_NAME, }; + private static final String[] MAPPED_LEAF_FIELD_NAMES = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME, + DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, GEO_POINT_FIELD_NAME, }; private static final int NUMBER_OF_TESTQUERIES = 20; protected static Version indexVersionCreated; @@ -1078,11 +1076,9 @@ public void onRemoval(ShardId shardId, Accountable accountable) { STRING_FIELD_NAME, "type=text", STRING_FIELD_NAME_2, "type=keyword", INT_FIELD_NAME, "type=integer", - INT_RANGE_FIELD_NAME, "type=integer_range", DOUBLE_FIELD_NAME, "type=double", BOOLEAN_FIELD_NAME, "type=boolean", DATE_FIELD_NAME, "type=date", - DATE_RANGE_FIELD_NAME, "type=date_range", OBJECT_FIELD_NAME, "type=object", GEO_POINT_FIELD_NAME, "type=geo_point", GEO_SHAPE_FIELD_NAME, "type=geo_shape" From 7be5ee5f28e715854d64f116bc8352bf953f2981 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 13 Sep 2017 12:15:15 -0400 Subject: [PATCH 25/67] Initialize checkpoint tracker with allocation ID This commit pushes the allocation ID down through to the global checkpoint tracker at construction rather than when activated as a primary. Relates #26630 --- .../elasticsearch/index/engine/Engine.java | 2 + .../index/engine/EngineConfig.java | 13 +++- .../index/engine/InternalEngine.java | 4 +- .../index/seqno/GlobalCheckpointTracker.java | 12 ++- .../index/seqno/SequenceNumbersService.java | 5 +- .../elasticsearch/index/shard/IndexShard.java | 2 +- .../index/engine/InternalEngineTests.java | 34 ++++---- .../seqno/GlobalCheckpointTrackerTests.java | 77 +++++++++++-------- .../index/shard/RefreshListenersTests.java | 9 ++- 9 files changed, 99 insertions(+), 59 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 9fd46d53049f2..9b304de6077fc 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -97,6 +97,7 @@ public abstract class Engine implements Closeable { public static final String SYNC_COMMIT_ID = "sync_id"; protected final ShardId shardId; + protected final String allocationId; protected final Logger logger; protected final EngineConfig engineConfig; protected final Store store; @@ -126,6 +127,7 @@ protected Engine(EngineConfig engineConfig) { this.engineConfig = engineConfig; this.shardId = engineConfig.getShardId(); + this.allocationId = engineConfig.getAllocationId(); this.store = engineConfig.getStore(); this.logger = Loggers.getLogger(Engine.class, // we use the engine class directly here to make sure all subclasses have the same logger name engineConfig.getIndexSettings().getSettings(), engineConfig.getShardId()); diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index d7019c77321da..66911ab80c723 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -51,6 +51,7 @@ */ public final class EngineConfig { private final ShardId shardId; + private final String allocationId; private final IndexSettings indexSettings; private final ByteSizeValue indexingBufferSize; private volatile boolean enableGcDeletes = true; @@ -109,7 +110,7 @@ public final class EngineConfig { /** * Creates a new {@link org.elasticsearch.index.engine.EngineConfig} */ - public EngineConfig(OpenMode openMode, ShardId shardId, ThreadPool threadPool, + public EngineConfig(OpenMode openMode, ShardId shardId, String allocationId, ThreadPool threadPool, IndexSettings indexSettings, Engine.Warmer warmer, Store store, MergePolicy mergePolicy, Analyzer analyzer, Similarity similarity, CodecService codecService, Engine.EventListener eventListener, @@ -120,6 +121,7 @@ public EngineConfig(OpenMode openMode, ShardId shardId, ThreadPool threadPool, throw new IllegalArgumentException("openMode must not be null"); } this.shardId = shardId; + this.allocationId = allocationId; this.indexSettings = indexSettings; this.threadPool = threadPool; this.warmer = warmer == null ? (a) -> {} : warmer; @@ -240,6 +242,15 @@ public IndexSettings getIndexSettings() { */ public ShardId getShardId() { return shardId; } + /** + * Returns the allocation ID for the shard. + * + * @return the allocation ID + */ + public String getAllocationId() { + return allocationId; + } + /** * Returns the analyzer as the default analyzer in the engines {@link org.apache.lucene.index.IndexWriter} */ diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 4bd4634a8cb09..e1bf949f50eab 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -192,7 +192,7 @@ public InternalEngine(EngineConfig engineConfig) throws EngineException { throw new IllegalArgumentException(openMode.toString()); } logger.trace("recovered [{}]", seqNoStats); - seqNoService = sequenceNumberService(shardId, engineConfig.getIndexSettings(), seqNoStats); + seqNoService = sequenceNumberService(shardId, allocationId, engineConfig.getIndexSettings(), seqNoStats); updateMaxUnsafeAutoIdTimestampFromWriter(writer); indexWriter = writer; translog = openTranslog(engineConfig, writer, translogDeletionPolicy, () -> seqNoService().getGlobalCheckpoint()); @@ -283,10 +283,12 @@ private void updateMaxUnsafeAutoIdTimestampFromWriter(IndexWriter writer) { private static SequenceNumbersService sequenceNumberService( final ShardId shardId, + final String allocationId, final IndexSettings indexSettings, final SeqNoStats seqNoStats) { return new SequenceNumbersService( shardId, + allocationId, indexSettings, seqNoStats.getMaxSeqNo(), seqNoStats.getLocalCheckpoint(), diff --git a/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointTracker.java b/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointTracker.java index 4df58bcab4459..4d9c493540280 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointTracker.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointTracker.java @@ -50,6 +50,8 @@ */ public class GlobalCheckpointTracker extends AbstractIndexShardComponent { + private final String allocationId; + /** * The global checkpoint tracker can operate in two modes: * - primary: this shard is in charge of collecting local checkpoint information from all shard copies and computing the global @@ -245,12 +247,18 @@ private boolean invariant() { * {@link SequenceNumbers#UNASSIGNED_SEQ_NO}. * * @param shardId the shard ID + * @param allocationId the allocation ID * @param indexSettings the index settings * @param globalCheckpoint the last known global checkpoint for this shard, or {@link SequenceNumbers#UNASSIGNED_SEQ_NO} */ - GlobalCheckpointTracker(final ShardId shardId, final IndexSettings indexSettings, final long globalCheckpoint) { + GlobalCheckpointTracker( + final ShardId shardId, + final String allocationId, + final IndexSettings indexSettings, + final long globalCheckpoint) { super(shardId, indexSettings); assert globalCheckpoint >= SequenceNumbers.UNASSIGNED_SEQ_NO : "illegal initial global checkpoint: " + globalCheckpoint; + this.allocationId = allocationId; this.primaryMode = false; this.handoffInProgress = false; this.appliedClusterStateVersion = -1L; @@ -310,7 +318,7 @@ public synchronized void updateGlobalCheckpointOnReplica(final long globalCheckp /** * Initializes the global checkpoint tracker in primary mode (see {@link #primaryMode}. Called on primary activation or promotion. */ - public synchronized void activatePrimaryMode(final String allocationId, final long localCheckpoint) { + public synchronized void activatePrimaryMode(final long localCheckpoint) { assert invariant(); assert primaryMode == false; assert localCheckpoints.get(allocationId) != null && localCheckpoints.get(allocationId).inSync && diff --git a/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java b/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java index 44ad8db39a2a6..2c4286e6e5798 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java @@ -54,13 +54,14 @@ public class SequenceNumbersService extends AbstractIndexShardComponent { */ public SequenceNumbersService( final ShardId shardId, + final String allocationId, final IndexSettings indexSettings, final long maxSeqNo, final long localCheckpoint, final long globalCheckpoint) { super(shardId, indexSettings); localCheckpointTracker = new LocalCheckpointTracker(indexSettings, maxSeqNo, localCheckpoint); - globalCheckpointTracker = new GlobalCheckpointTracker(shardId, indexSettings, globalCheckpoint); + globalCheckpointTracker = new GlobalCheckpointTracker(shardId, allocationId, indexSettings, globalCheckpoint); } /** @@ -201,7 +202,7 @@ public synchronized long getTrackedLocalCheckpointForShard(final String allocati * Called on primary activation or promotion. */ public void activatePrimaryMode(final String allocationId, final long localCheckpoint) { - globalCheckpointTracker.activatePrimaryMode(allocationId, localCheckpoint); + globalCheckpointTracker.activatePrimaryMode(localCheckpoint); } /** diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 22d6ba20be2cd..9845064c4df4a 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -2074,7 +2074,7 @@ private DocumentMapperForType docMapper(String type) { private EngineConfig newEngineConfig(EngineConfig.OpenMode openMode) { Sort indexSort = indexSortSupplier.get(); - return new EngineConfig(openMode, shardId, + return new EngineConfig(openMode, shardId, shardRouting.allocationId().getId(), threadPool, indexSettings, warmer, store, indexSettings.getMergePolicy(), mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, shardEventListener, indexCache.query(), cachingPolicy, translogConfig, diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index c75926bbc0171..8d948846fbece 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -73,6 +73,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; @@ -80,6 +81,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; @@ -198,6 +200,7 @@ public class InternalEngineTests extends ESTestCase { protected final ShardId shardId = new ShardId(new Index("index", "_na_"), 0); + protected final AllocationId allocationId = AllocationId.newInitializing(); private static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings("index", Settings.EMPTY); protected ThreadPool threadPool; @@ -264,11 +267,11 @@ public EngineConfig copy(EngineConfig config, EngineConfig.OpenMode openMode) { } public EngineConfig copy(EngineConfig config, EngineConfig.OpenMode openMode, Analyzer analyzer) { - return new EngineConfig(openMode, config.getShardId(), config.getThreadPool(), config.getIndexSettings(), config.getWarmer(), - config.getStore(), config.getMergePolicy(), analyzer, config.getSimilarity(), - new CodecService(null, logger), config.getEventListener(), config.getQueryCache(), - config.getQueryCachingPolicy(), config.getTranslogConfig(), - config.getFlushMergesAfter(), config.getRefreshListeners(), config.getIndexSort(), config.getTranslogRecoveryRunner()); + return new EngineConfig(openMode, config.getShardId(), config.getAllocationId(), config.getThreadPool(), config.getIndexSettings(), + config.getWarmer(), config.getStore(), config.getMergePolicy(), analyzer, config.getSimilarity(), + new CodecService(null, logger), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), + config.getTranslogConfig(), config.getFlushMergesAfter(), config.getRefreshListeners(), config.getIndexSort(), + config.getTranslogRecoveryRunner()); } @Override @@ -447,7 +450,7 @@ public void onFailedEngine(String reason, @Nullable Exception e) { indexSettings.getSettings())); final List refreshListenerList = refreshListener == null ? emptyList() : Collections.singletonList(refreshListener); - EngineConfig config = new EngineConfig(openMode, shardId, threadPool, indexSettings, null, store, + EngineConfig config = new EngineConfig(openMode, shardId, allocationId.getId(), threadPool, indexSettings, null, store, mergePolicy, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), listener, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), refreshListenerList, indexSort, handler); @@ -728,6 +731,7 @@ public void testCommitStats() throws IOException { Store store = createStore(); InternalEngine engine = createEngine(store, createTempDir(), (config) -> new SequenceNumbersService( config.getShardId(), + config.getAllocationId(), config.getIndexSettings(), maxSeqNo.get(), localCheckpoint.get(), @@ -901,6 +905,7 @@ public void testTranslogRecoveryWithMultipleGenerations() throws IOException { initialEngine = createEngine(store, createTempDir(), (config) -> new SequenceNumbersService( config.getShardId(), + config.getAllocationId(), config.getIndexSettings(), SequenceNumbers.NO_OPS_PERFORMED, SequenceNumbers.NO_OPS_PERFORMED, @@ -2028,7 +2033,7 @@ public void testSeqNoAndCheckpoints() throws IOException { try { initialEngine = engine; - final ShardRouting primary = TestShardRouting.newShardRouting(shardId, "node1", true, ShardRoutingState.STARTED); + final ShardRouting primary = TestShardRouting.newShardRouting("test", shardId.id(), "node1", null, true, ShardRoutingState.STARTED, allocationId); final ShardRouting replica = TestShardRouting.newShardRouting(shardId, "node2", false, ShardRoutingState.STARTED); initialEngine.seqNoService().updateAllocationIdsFromMaster(1L, new HashSet<>(Arrays.asList(primary.allocationId().getId(), replica.allocationId().getId())), @@ -2788,12 +2793,11 @@ public void testRecoverFromForeignTranslog() throws IOException { TranslogConfig translogConfig = new TranslogConfig(shardId, translog.location(), config.getIndexSettings(), BigArrays.NON_RECYCLING_INSTANCE); - EngineConfig brokenConfig = new EngineConfig(EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, shardId, threadPool, - config.getIndexSettings(), null, store, newMergePolicy(), config.getAnalyzer(), - config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), - IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, - TimeValue.timeValueMinutes(5), config.getRefreshListeners(), null, - config.getTranslogRecoveryRunner()); + EngineConfig brokenConfig = new EngineConfig(EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, shardId, allocationId.getId(), + threadPool, config.getIndexSettings(), null, store, newMergePolicy(), config.getAnalyzer(), config.getSimilarity(), + new CodecService(null, logger), config.getEventListener(), IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), config.getRefreshListeners(), + null, config.getTranslogRecoveryRunner()); try { InternalEngine internalEngine = new InternalEngine(brokenConfig); @@ -3628,6 +3632,7 @@ private SequenceNumbersService getStallingSeqNoService( final AtomicLong expectedLocalCheckpoint) { return new SequenceNumbersService( shardId, + allocationId.getId(), defaultSettings, SequenceNumbers.NO_OPS_PERFORMED, SequenceNumbers.NO_OPS_PERFORMED, @@ -3839,7 +3844,7 @@ public void testNoOps() throws IOException { final int globalCheckpoint = randomIntBetween(0, localCheckpoint); try { final SequenceNumbersService seqNoService = - new SequenceNumbersService(shardId, defaultSettings, maxSeqNo, localCheckpoint, globalCheckpoint) { + new SequenceNumbersService(shardId, allocationId.getId(), defaultSettings, maxSeqNo, localCheckpoint, globalCheckpoint) { @Override public long generateSeqNo() { throw new UnsupportedOperationException(); @@ -3986,6 +3991,7 @@ public void testRestoreLocalCheckpointFromTranslog() throws IOException { final SequenceNumbersService seqNoService = new SequenceNumbersService( shardId, + allocationId.getId(), defaultSettings, SequenceNumbers.NO_OPS_PERFORMED, SequenceNumbers.NO_OPS_PERFORMED, diff --git a/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointTrackerTests.java b/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointTrackerTests.java index 8d53c69e2713e..db686248ff2cc 100644 --- a/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointTrackerTests.java +++ b/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointTrackerTests.java @@ -25,11 +25,13 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; @@ -46,7 +48,6 @@ import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -61,25 +62,11 @@ public class GlobalCheckpointTrackerTests extends ESTestCase { - GlobalCheckpointTracker tracker; - - @Override - @Before - public void setUp() throws Exception { - super.setUp(); - tracker = - new GlobalCheckpointTracker( - new ShardId("test", "_na_", 0), - IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), - UNASSIGNED_SEQ_NO); - } - public void testEmptyShards() { + final GlobalCheckpointTracker tracker = newTracker(AllocationId.newInitializing()); assertThat(tracker.getGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); } - private final AtomicInteger aIdGenerator = new AtomicInteger(); - private Map randomAllocationsWithLocalCheckpoints(int min, int max) { Map allocations = new HashMap<>(); for (int i = randomIntBetween(min, max); i > 0; i--) { @@ -117,6 +104,7 @@ public void testGlobalCheckpointUpdate() { // it is however nice not to assume this on this level and check we do the right thing. final long minLocalCheckpoint = allocations.values().stream().min(Long::compare).orElse(UNASSIGNED_SEQ_NO); + final GlobalCheckpointTracker tracker = newTracker(active.iterator().next()); assertThat(tracker.getGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); logger.info("--> using allocations"); @@ -133,7 +121,7 @@ public void testGlobalCheckpointUpdate() { }); tracker.updateFromMaster(initialClusterStateVersion, ids(active), routingTable(initializing), emptySet()); - tracker.activatePrimaryMode(active.iterator().next().getId(), NO_OPS_PERFORMED); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); initializing.forEach(aId -> markAllocationIdAsInSyncQuietly(tracker, aId.getId(), NO_OPS_PERFORMED)); allocations.keySet().forEach(aId -> tracker.updateLocalCheckpoint(aId.getId(), allocations.get(aId))); @@ -179,9 +167,10 @@ public void testMissingActiveIdsPreventAdvance() { final Map assigned = new HashMap<>(); assigned.putAll(active); assigned.putAll(initializing); - tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet()), emptySet()); AllocationId primary = active.keySet().iterator().next(); - tracker.activatePrimaryMode(primary.getId(), NO_OPS_PERFORMED); + final GlobalCheckpointTracker tracker = newTracker(primary); + tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet()), emptySet()); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); randomSubsetOf(initializing.keySet()).forEach(k -> markAllocationIdAsInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); final AllocationId missingActiveID = randomFrom(active.keySet()); assigned @@ -202,9 +191,11 @@ public void testMissingInSyncIdsPreventAdvance() { final Map active = randomAllocationsWithLocalCheckpoints(1, 5); final Map initializing = randomAllocationsWithLocalCheckpoints(2, 5); logger.info("active: {}, initializing: {}", active, initializing); - tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet()), emptySet()); + AllocationId primary = active.keySet().iterator().next(); - tracker.activatePrimaryMode(primary.getId(), NO_OPS_PERFORMED); + final GlobalCheckpointTracker tracker = newTracker(primary); + tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet()), emptySet()); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); randomSubsetOf(randomIntBetween(1, initializing.size() - 1), initializing.keySet()).forEach(aId -> markAllocationIdAsInSyncQuietly(tracker, aId.getId(), NO_OPS_PERFORMED)); @@ -221,8 +212,9 @@ public void testInSyncIdsAreIgnoredIfNotValidatedByMaster() { final Map active = randomAllocationsWithLocalCheckpoints(1, 5); final Map initializing = randomAllocationsWithLocalCheckpoints(1, 5); final Map nonApproved = randomAllocationsWithLocalCheckpoints(1, 5); + final GlobalCheckpointTracker tracker = newTracker(active.keySet().iterator().next()); tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet()), emptySet()); - tracker.activatePrimaryMode(active.keySet().iterator().next().getId(), NO_OPS_PERFORMED); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); initializing.keySet().forEach(k -> markAllocationIdAsInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); nonApproved.keySet().forEach(k -> expectThrows(IllegalStateException.class, () -> markAllocationIdAsInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED))); @@ -251,8 +243,9 @@ public void testInSyncIdsAreRemovedIfNotValidatedByMaster() { if (randomBoolean()) { allocations.putAll(initializingToBeRemoved); } + final GlobalCheckpointTracker tracker = newTracker(active.iterator().next()); tracker.updateFromMaster(initialClusterStateVersion, ids(active), routingTable(initializing), emptySet()); - tracker.activatePrimaryMode(active.iterator().next().getId(), NO_OPS_PERFORMED); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); if (randomBoolean()) { initializingToStay.keySet().forEach(k -> markAllocationIdAsInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); } else { @@ -286,9 +279,10 @@ public void testWaitForAllocationIdToBeInSync() throws Exception { final AtomicBoolean complete = new AtomicBoolean(); final AllocationId inSyncAllocationId = AllocationId.newInitializing(); final AllocationId trackingAllocationId = AllocationId.newInitializing(); + final GlobalCheckpointTracker tracker = newTracker(inSyncAllocationId); tracker.updateFromMaster(randomNonNegativeLong(), Collections.singleton(inSyncAllocationId.getId()), routingTable(Collections.singleton(trackingAllocationId)), emptySet()); - tracker.activatePrimaryMode(inSyncAllocationId.getId(), globalCheckpoint); + tracker.activatePrimaryMode(globalCheckpoint); final Thread thread = new Thread(() -> { try { // synchronize starting with the test thread @@ -326,6 +320,14 @@ public void testWaitForAllocationIdToBeInSync() throws Exception { thread.join(); } + private GlobalCheckpointTracker newTracker(final AllocationId allocationId) { + return new GlobalCheckpointTracker( + new ShardId("test", "_na_", 0), + allocationId.getId(), + IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), + UNASSIGNED_SEQ_NO); + } + public void testWaitForAllocationIdToBeInSyncCanBeInterrupted() throws BrokenBarrierException, InterruptedException { final int localCheckpoint = randomIntBetween(1, 32); final int globalCheckpoint = randomIntBetween(localCheckpoint + 1, 64); @@ -333,9 +335,10 @@ public void testWaitForAllocationIdToBeInSyncCanBeInterrupted() throws BrokenBar final AtomicBoolean interrupted = new AtomicBoolean(); final AllocationId inSyncAllocationId = AllocationId.newInitializing(); final AllocationId trackingAllocationId = AllocationId.newInitializing(); + final GlobalCheckpointTracker tracker = newTracker(inSyncAllocationId); tracker.updateFromMaster(randomNonNegativeLong(), Collections.singleton(inSyncAllocationId.getId()), routingTable(Collections.singleton(trackingAllocationId)), emptySet()); - tracker.activatePrimaryMode(inSyncAllocationId.getId(), globalCheckpoint); + tracker.activatePrimaryMode(globalCheckpoint); final Thread thread = new Thread(() -> { try { // synchronize starting with the test thread @@ -380,9 +383,10 @@ public void testUpdateAllocationIdsFromMaster() throws Exception { final Set activeAllocationIds = activeAndInitializingAllocationIds.v1(); final Set initializingIds = activeAndInitializingAllocationIds.v2(); IndexShardRoutingTable routingTable = routingTable(initializingIds); - tracker.updateFromMaster(initialClusterStateVersion, ids(activeAllocationIds), routingTable, emptySet()); AllocationId primaryId = activeAllocationIds.iterator().next(); - tracker.activatePrimaryMode(primaryId.getId(), NO_OPS_PERFORMED); + final GlobalCheckpointTracker tracker = newTracker(primaryId); + tracker.updateFromMaster(initialClusterStateVersion, ids(activeAllocationIds), routingTable, emptySet()); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); assertThat(tracker.getReplicationGroup().getInSyncAllocationIds(), equalTo(ids(activeAllocationIds))); assertThat(tracker.getReplicationGroup().getRoutingTable(), equalTo(routingTable)); @@ -529,9 +533,10 @@ public void testRaceUpdatingGlobalCheckpoint() throws InterruptedException, Brok final CyclicBarrier barrier = new CyclicBarrier(4); final int activeLocalCheckpoint = randomIntBetween(0, Integer.MAX_VALUE - 1); + final GlobalCheckpointTracker tracker = newTracker(active); tracker.updateFromMaster(randomNonNegativeLong(), Collections.singleton(active.getId()), routingTable(Collections.singleton(initializing)), emptySet()); - tracker.activatePrimaryMode(active.getId(), activeLocalCheckpoint); + tracker.activatePrimaryMode(activeLocalCheckpoint); final int nextActiveLocalCheckpoint = randomIntBetween(activeLocalCheckpoint + 1, Integer.MAX_VALUE); final Thread activeThread = new Thread(() -> { try { @@ -574,12 +579,15 @@ public void testRaceUpdatingGlobalCheckpoint() throws InterruptedException, Brok } public void testPrimaryContextHandoff() throws IOException { - GlobalCheckpointTracker oldPrimary = new GlobalCheckpointTracker(new ShardId("test", "_na_", 0), - IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), UNASSIGNED_SEQ_NO); - GlobalCheckpointTracker newPrimary = new GlobalCheckpointTracker(new ShardId("test", "_na_", 0), - IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), UNASSIGNED_SEQ_NO); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", Settings.EMPTY); + final ShardId shardId = new ShardId("test", "_na_", 0); FakeClusterState clusterState = initialState(); + GlobalCheckpointTracker oldPrimary = + new GlobalCheckpointTracker(shardId, randomFrom(ids(clusterState.inSyncIds)), indexSettings, UNASSIGNED_SEQ_NO); + GlobalCheckpointTracker newPrimary = + new GlobalCheckpointTracker(shardId, UUIDs.randomBase64UUID(random()), indexSettings, UNASSIGNED_SEQ_NO); + clusterState.apply(oldPrimary); clusterState.apply(newPrimary); @@ -686,9 +694,10 @@ public void testPrimaryContextHandoff() throws IOException { public void testIllegalStateExceptionIfUnknownAllocationId() { final AllocationId active = AllocationId.newInitializing(); final AllocationId initializing = AllocationId.newInitializing(); + final GlobalCheckpointTracker tracker = newTracker(active); tracker.updateFromMaster(randomNonNegativeLong(), Collections.singleton(active.getId()), routingTable(Collections.singleton(initializing)), emptySet()); - tracker.activatePrimaryMode(active.getId(), NO_OPS_PERFORMED); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); expectThrows(IllegalStateException.class, () -> tracker.initiateTracking(randomAlphaOfLength(10))); expectThrows(IllegalStateException.class, () -> tracker.markAllocationIdAsInSync(randomAlphaOfLength(10), randomNonNegativeLong())); @@ -731,7 +740,7 @@ private static FakeClusterState initialState() { } private static void activatePrimary(FakeClusterState clusterState, GlobalCheckpointTracker gcp) { - gcp.activatePrimaryMode(randomFrom(ids(clusterState.inSyncIds)), randomIntBetween(Math.toIntExact(NO_OPS_PERFORMED), 10)); + gcp.activatePrimaryMode(randomIntBetween(Math.toIntExact(NO_OPS_PERFORMED), 10)); } private static void randomLocalCheckpointUpdate(GlobalCheckpointTracker gcp) { diff --git a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 6b5bd57aed9c2..01893a99ae4e3 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -28,6 +28,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.IOUtils; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.uid.Versions; @@ -98,6 +99,7 @@ public void setupListeners() throws Exception { threadPool = new TestThreadPool(getTestName()); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("index", Settings.EMPTY); ShardId shardId = new ShardId(new Index("index", "_na_"), 1); + String allocationId = UUIDs.randomBase64UUID(random()); Directory directory = newDirectory(); DirectoryService directoryService = new DirectoryService(shardId, indexSettings) { @Override @@ -115,10 +117,9 @@ public void onFailedEngine(String reason, @Nullable Exception e) { // we don't need to notify anybody in this test } }; - EngineConfig config = new EngineConfig(EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG, shardId, threadPool, indexSettings, null, - store, newMergePolicy(), iwc.getAnalyzer(), - iwc.getSimilarity(), new CodecService(null, logger), eventListener, - IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, + EngineConfig config = new EngineConfig(EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG, shardId, allocationId, threadPool, + indexSettings, null, store, newMergePolicy(), iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), + eventListener, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), Collections.singletonList(listeners), null, null); engine = new InternalEngine(config); listeners.setTranslog(engine.getTranslog()); From 2eaf7534f322b25ed4d80f985ebbbe32a5ead83e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 13 Sep 2017 18:21:15 +0200 Subject: [PATCH 26/67] [Tests] Removing skipping tests in search rest tests After backporting the script_field soft limit to the 6.x branches, this test can now also run in a mixed cluster. Relates to #26598 enter the commit message for your changes. Lines starting --- .../java/org/elasticsearch/index/IndexSettings.java | 2 +- .../rest-api-spec/test/search/30_limits.yml | 13 ++----------- 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/IndexSettings.java b/core/src/main/java/org/elasticsearch/index/IndexSettings.java index 20c23e3becddc..29395088b3ca3 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/core/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -101,7 +101,7 @@ public final class IndexSettings { /** * Index setting describing the maximum value of allowed `script_fields`that can be retrieved - * per search request. The default maximum of 50 is defensive for the reason that retrieving + * per search request. The default maximum of 32 is defensive for the reason that retrieving * script fields is a costly operation. */ public static final Setting MAX_SCRIPT_FIELDS_SETTING = diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml index b0b5e5ffede16..3ee998224522c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml @@ -5,6 +5,7 @@ setup: body: settings: index.max_docvalue_fields_search: 2 + index.max_script_fields: 2 - do: index: @@ -72,21 +73,11 @@ setup: --- "Script_fields size limit": - - skip: - version: " - 6.99.99" - reason: soft limit for script_fields only available as of 7.0.0 - - - do: - indices.create: - index: test_2 - body: - settings: - index.max_script_fields: 2 - do: catch: /Trying to retrieve too many script_fields\. Must be less than or equal to[:] \[2\] but was \[3\]\. This limit can be set by changing the \[index.max_script_fields\] index level setting\./ search: - index: test_2 + index: test_1 body: query: match_all: {} From b4de2a6f288e1416bdff14bd22b13b17e0863f4b Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 13 Sep 2017 22:14:17 +0200 Subject: [PATCH 27/67] Add BootstrapContext to expose settings and recovered state to bootstrap checks (#26628) This exposes the node settings and the persistent part of the cluster state to the bootstrap checks to allow plugins to enforce certain preconditions based on the recovered state. --- .../elasticsearch/bootstrap/Bootstrap.java | 4 +- .../bootstrap/BootstrapCheck.java | 4 +- .../bootstrap/BootstrapChecks.java | 71 +++++------ .../bootstrap/BootstrapContext.java | 41 +++++++ .../gateway/GatewayMetaState.java | 3 +- .../gateway/MetaStateService.java | 2 +- .../java/org/elasticsearch/node/Node.java | 25 +++- .../node/NodeValidationException.java | 4 +- .../bootstrap/BootstrapChecksTests.java | 110 ++++++++++-------- .../org/elasticsearch/node/NodeTests.java | 5 +- .../bootstrap/EvilBootstrapChecksTests.java | 9 +- 11 files changed, 171 insertions(+), 107 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/bootstrap/BootstrapContext.java diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 840a62e5e916f..30b9fb7e28dd0 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -212,9 +212,9 @@ public void run() { node = new Node(environment) { @Override protected void validateNodeBeforeAcceptingRequests( - final Settings settings, + final BootstrapContext context, final BoundTransportAddress boundTransportAddress, List checks) throws NodeValidationException { - BootstrapChecks.check(settings, boundTransportAddress, checks); + BootstrapChecks.check(context, boundTransportAddress, checks); } }; } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java index ffe52dfe5b957..a2620b2560c0d 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java @@ -27,9 +27,10 @@ public interface BootstrapCheck { /** * Test if the node fails the check. * + * @param context the bootstrap context for more sophisticated checks * @return {@code true} if the node failed the check */ - boolean check(); + boolean check(BootstrapContext context); /** * The error message for a failed check. @@ -41,5 +42,4 @@ public interface BootstrapCheck { default boolean alwaysEnforce() { return false; } - } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java index 4adec75ae67a0..b13f36229f9a8 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.discovery.DiscoveryModule; @@ -65,18 +64,18 @@ private BootstrapChecks() { * {@code es.enforce.bootstrap.checks} is set to {@code true} then the bootstrap checks will be enforced regardless of whether or not * the transport protocol is bound to a non-loopback interface. * - * @param settings the current node settings + * @param context the current node bootstrap context * @param boundTransportAddress the node network bindings */ - static void check(final Settings settings, final BoundTransportAddress boundTransportAddress, List additionalChecks) - throws NodeValidationException { - final List builtInChecks = checks(settings); + static void check(final BootstrapContext context, final BoundTransportAddress boundTransportAddress, + List additionalChecks) throws NodeValidationException { + final List builtInChecks = checks(); final List combinedChecks = new ArrayList<>(builtInChecks); combinedChecks.addAll(additionalChecks); - check( - enforceLimits(boundTransportAddress, DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings)), + check( context, + enforceLimits(boundTransportAddress, DiscoveryModule.DISCOVERY_TYPE_SETTING.get(context.settings)), Collections.unmodifiableList(combinedChecks), - Node.NODE_NAME_SETTING.get(settings)); + Node.NODE_NAME_SETTING.get(context.settings)); } /** @@ -84,15 +83,17 @@ static void check(final Settings settings, final BoundTransportAddress boundTran * property {@code es.enforce.bootstrap.checks} is set to {@code true} then the bootstrap checks will be enforced regardless of whether * or not the transport protocol is bound to a non-loopback interface. * + * @param context the current node boostrap context * @param enforceLimits {@code true} if the checks should be enforced or otherwise warned * @param checks the checks to execute * @param nodeName the node name to be used as a logging prefix */ static void check( + final BootstrapContext context, final boolean enforceLimits, final List checks, final String nodeName) throws NodeValidationException { - check(enforceLimits, checks, Loggers.getLogger(BootstrapChecks.class, nodeName)); + check(context, enforceLimits, checks, Loggers.getLogger(BootstrapChecks.class, nodeName)); } /** @@ -100,11 +101,13 @@ static void check( * property {@code es.enforce.bootstrap.checks }is set to {@code true} then the bootstrap checks will be enforced regardless of whether * or not the transport protocol is bound to a non-loopback interface. * + * @param context the current node boostrap context * @param enforceLimits {@code true} if the checks should be enforced or otherwise warned * @param checks the checks to execute * @param logger the logger to */ static void check( + final BootstrapContext context, final boolean enforceLimits, final List checks, final Logger logger) throws NodeValidationException { @@ -134,7 +137,7 @@ static void check( } for (final BootstrapCheck check : checks) { - if (check.check()) { + if (check.check(context)) { if (!(enforceLimits || enforceBootstrapChecks) && !check.alwaysEnforce()) { ignoredErrors.add(check.errorMessage()); } else { @@ -180,13 +183,13 @@ static boolean enforceLimits(final BoundTransportAddress boundTransportAddress, } // the list of checks to execute - static List checks(final Settings settings) { + static List checks() { final List checks = new ArrayList<>(); checks.add(new HeapSizeCheck()); final FileDescriptorCheck fileDescriptorCheck = Constants.MAC_OS_X ? new OsXFileDescriptorCheck() : new FileDescriptorCheck(); checks.add(fileDescriptorCheck); - checks.add(new MlockallCheck(BootstrapSettings.MEMORY_LOCK_SETTING.get(settings))); + checks.add(new MlockallCheck()); if (Constants.LINUX) { checks.add(new MaxNumberOfThreadsCheck()); } @@ -201,7 +204,7 @@ static List checks(final Settings settings) { } checks.add(new ClientJvmCheck()); checks.add(new UseSerialGCCheck()); - checks.add(new SystemCallFilterCheck(BootstrapSettings.SYSTEM_CALL_FILTER_SETTING.get(settings))); + checks.add(new SystemCallFilterCheck()); checks.add(new OnErrorCheck()); checks.add(new OnOutOfMemoryErrorCheck()); checks.add(new EarlyAccessCheck()); @@ -212,7 +215,7 @@ static List checks(final Settings settings) { static class HeapSizeCheck implements BootstrapCheck { @Override - public boolean check() { + public boolean check(BootstrapContext context) { final long initialHeapSize = getInitialHeapSize(); final long maxHeapSize = getMaxHeapSize(); return initialHeapSize != 0 && maxHeapSize != 0 && initialHeapSize != maxHeapSize; @@ -268,7 +271,7 @@ protected FileDescriptorCheck(final int limit) { this.limit = limit; } - public final boolean check() { + public final boolean check(BootstrapContext context) { final long maxFileDescriptorCount = getMaxFileDescriptorCount(); return maxFileDescriptorCount != -1 && maxFileDescriptorCount < limit; } @@ -292,15 +295,9 @@ long getMaxFileDescriptorCount() { static class MlockallCheck implements BootstrapCheck { - private final boolean mlockallSet; - - MlockallCheck(final boolean mlockAllSet) { - this.mlockallSet = mlockAllSet; - } - @Override - public boolean check() { - return mlockallSet && !isMemoryLocked(); + public boolean check(BootstrapContext context) { + return BootstrapSettings.MEMORY_LOCK_SETTING.get(context.settings) && !isMemoryLocked(); } @Override @@ -321,7 +318,7 @@ static class MaxNumberOfThreadsCheck implements BootstrapCheck { private static final long MAX_NUMBER_OF_THREADS_THRESHOLD = 1 << 12; @Override - public boolean check() { + public boolean check(BootstrapContext context) { return getMaxNumberOfThreads() != -1 && getMaxNumberOfThreads() < MAX_NUMBER_OF_THREADS_THRESHOLD; } @@ -345,7 +342,7 @@ long getMaxNumberOfThreads() { static class MaxSizeVirtualMemoryCheck implements BootstrapCheck { @Override - public boolean check() { + public boolean check(BootstrapContext context) { return getMaxSizeVirtualMemory() != Long.MIN_VALUE && getMaxSizeVirtualMemory() != getRlimInfinity(); } @@ -376,7 +373,7 @@ long getMaxSizeVirtualMemory() { static class MaxFileSizeCheck implements BootstrapCheck { @Override - public boolean check() { + public boolean check(BootstrapContext context) { final long maxFileSize = getMaxFileSize(); return maxFileSize != Long.MIN_VALUE && maxFileSize != getRlimInfinity(); } @@ -405,7 +402,7 @@ static class MaxMapCountCheck implements BootstrapCheck { private static final long LIMIT = 1 << 18; @Override - public boolean check() { + public boolean check(BootstrapContext context) { return getMaxMapCount() != -1 && getMaxMapCount() < LIMIT; } @@ -470,7 +467,7 @@ long parseProcSysVmMaxMapCount(final String procSysVmMaxMapCount) throws NumberF static class ClientJvmCheck implements BootstrapCheck { @Override - public boolean check() { + public boolean check(BootstrapContext context) { return getVmName().toLowerCase(Locale.ROOT).contains("client"); } @@ -496,7 +493,7 @@ public String errorMessage() { static class UseSerialGCCheck implements BootstrapCheck { @Override - public boolean check() { + public boolean check(BootstrapContext context) { return getUseSerialGC().equals("true"); } @@ -521,15 +518,9 @@ public String errorMessage() { */ static class SystemCallFilterCheck implements BootstrapCheck { - private final boolean areSystemCallFiltersEnabled; - - SystemCallFilterCheck(final boolean areSystemCallFiltersEnabled) { - this.areSystemCallFiltersEnabled = areSystemCallFiltersEnabled; - } - @Override - public boolean check() { - return areSystemCallFiltersEnabled && !isSystemCallFilterInstalled(); + public boolean check(BootstrapContext context) { + return BootstrapSettings.SYSTEM_CALL_FILTER_SETTING.get(context.settings) && !isSystemCallFilterInstalled(); } // visible for testing @@ -548,7 +539,7 @@ public String errorMessage() { abstract static class MightForkCheck implements BootstrapCheck { @Override - public boolean check() { + public boolean check(BootstrapContext context) { return isSystemCallFilterInstalled() && mightFork(); } @@ -623,7 +614,7 @@ public String errorMessage() { static class EarlyAccessCheck implements BootstrapCheck { @Override - public boolean check() { + public boolean check(BootstrapContext context) { return "Oracle Corporation".equals(jvmVendor()) && javaVersion().endsWith("-ea"); } @@ -651,7 +642,7 @@ public String errorMessage() { static class G1GCCheck implements BootstrapCheck { @Override - public boolean check() { + public boolean check(BootstrapContext context) { if ("Oracle Corporation".equals(jvmVendor()) && isJava8() && isG1GCEnabled()) { final String jvmVersion = jvmVersion(); // HotSpot versions on Java 8 match this regular expression; note that this changes with Java 9 after JEP-223 diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapContext.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapContext.java new file mode 100644 index 0000000000000..f23d0db6d80bf --- /dev/null +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapContext.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.bootstrap; + +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.Settings; + +/** + * Context that is passed to every bootstrap check to make decisions on. + */ +public class BootstrapContext { + /** + * The nodes settings + */ + public final Settings settings; + /** + * The nodes local state metadata loaded on startup + */ + public final MetaData metaData; + + public BootstrapContext(Settings settings, MetaData metaData) { + this.settings = settings; + this.metaData = metaData; + } +} diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index 99a51adf96183..9d57392030ce6 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -41,6 +41,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.plugins.MetaDataUpgrader; +import java.io.IOException; import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; @@ -114,7 +115,7 @@ public GatewayMetaState(Settings settings, NodeEnvironment nodeEnv, MetaStateSer } } - public MetaData loadMetaState() throws Exception { + public MetaData loadMetaState() throws IOException { return metaStateService.loadFullState(); } diff --git a/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java index b900305ab55e1..5c820343cc807 100644 --- a/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -53,7 +53,7 @@ public MetaStateService(Settings settings, NodeEnvironment nodeEnv, NamedXConten * Loads the full state, which includes both the global state and all the indices * meta state. */ - MetaData loadFullState() throws Exception { + MetaData loadFullState() throws IOException { MetaData globalMetaData = loadGlobalState(); MetaData.Builder metaDataBuilder; if (globalMetaData != null) { diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index 10d8ddcf2105d..cee85c9619912 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -35,6 +35,7 @@ import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.update.UpdateHelper; import org.elasticsearch.bootstrap.BootstrapCheck; +import org.elasticsearch.bootstrap.BootstrapContext; import org.elasticsearch.client.Client; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.ClusterInfo; @@ -86,6 +87,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.gateway.GatewayAllocator; +import org.elasticsearch.gateway.GatewayMetaState; import org.elasticsearch.gateway.GatewayModule; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.gateway.MetaStateService; @@ -139,6 +141,7 @@ import java.io.BufferedWriter; import java.io.Closeable; import java.io.IOException; +import java.io.UncheckedIOException; import java.net.Inet6Address; import java.net.InetAddress; import java.net.InetSocketAddress; @@ -604,7 +607,23 @@ public Node start() throws NodeValidationException { assert localNodeFactory.getNode() != null; assert transportService.getLocalNode().equals(localNodeFactory.getNode()) : "transportService has a different local node than the factory provided"; - validateNodeBeforeAcceptingRequests(settings, transportService.boundAddress(), pluginsService.filterPlugins(Plugin.class).stream() + final MetaData onDiskMetadata; + try { + // we load the global state here (the persistent part of the cluster state stored on disk) to + // pass it to the bootstrap checks to allow plugins to enforce certain preconditions based on the recovered state. + if (DiscoveryNode.isMasterNode(settings) || DiscoveryNode.isDataNode(settings)) { + onDiskMetadata = injector.getInstance(GatewayMetaState.class).loadMetaState(); + } else { + onDiskMetadata = MetaData.EMPTY_META_DATA; + } + assert onDiskMetadata != null : "metadata is null but shouldn't"; // this is never null + } catch (IOException e) { + throw new UncheckedIOException(e); + } + validateNodeBeforeAcceptingRequests(new BootstrapContext(settings, onDiskMetadata), transportService.boundAddress(), pluginsService + .filterPlugins(Plugin + .class) + .stream() .flatMap(p -> p.getBootstrapChecks().stream()).collect(Collectors.toList())); clusterService.addStateApplier(transportService.getTaskManager()); @@ -811,13 +830,13 @@ public Injector injector() { * and before the network service starts accepting incoming network * requests. * - * @param settings the fully-resolved settings + * @param context the bootstrap context for this node * @param boundTransportAddress the network addresses the node is * bound and publishing to */ @SuppressWarnings("unused") protected void validateNodeBeforeAcceptingRequests( - final Settings settings, + final BootstrapContext context, final BoundTransportAddress boundTransportAddress, List bootstrapChecks) throws NodeValidationException { } diff --git a/core/src/main/java/org/elasticsearch/node/NodeValidationException.java b/core/src/main/java/org/elasticsearch/node/NodeValidationException.java index 01840b2556bcf..58e2c4ef951f6 100644 --- a/core/src/main/java/org/elasticsearch/node/NodeValidationException.java +++ b/core/src/main/java/org/elasticsearch/node/NodeValidationException.java @@ -27,8 +27,8 @@ /** * An exception thrown during node validation. Node validation runs immediately before a node * begins accepting network requests in - * {@link Node#validateNodeBeforeAcceptingRequests(Settings, BoundTransportAddress, List)}. This exception is a checked exception that - * is declared as thrown from this method for the purpose of bubbling up to the user. + * {@link Node#validateNodeBeforeAcceptingRequests(org.elasticsearch.bootstrap.BootstrapContext, BoundTransportAddress, List)}. + * This exception is a checked exception that is declared as thrown from this method for the purpose of bubbling up to the user. */ public class NodeValidationException extends Exception { diff --git a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java index 77276b8787fc2..a02590c30e0c5 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java @@ -21,6 +21,7 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -51,6 +52,7 @@ import static org.mockito.Mockito.when; public class BootstrapChecksTests extends ESTestCase { + private static final BootstrapContext defaultContext = new BootstrapContext(Settings.EMPTY, MetaData.EMPTY_META_DATA); public void testNonProductionMode() throws NodeValidationException { // nothing should happen since we are in non-production mode @@ -64,18 +66,18 @@ public void testNonProductionMode() throws NodeValidationException { BoundTransportAddress boundTransportAddress = mock(BoundTransportAddress.class); when(boundTransportAddress.boundAddresses()).thenReturn(transportAddresses.toArray(new TransportAddress[0])); when(boundTransportAddress.publishAddress()).thenReturn(publishAddress); - BootstrapChecks.check(Settings.EMPTY, boundTransportAddress, Collections.emptyList()); + BootstrapChecks.check(defaultContext, boundTransportAddress, Collections.emptyList()); } public void testNoLogMessageInNonProductionMode() throws NodeValidationException { final Logger logger = mock(Logger.class); - BootstrapChecks.check(false, Collections.emptyList(), logger); + BootstrapChecks.check(defaultContext, false, Collections.emptyList(), logger); verifyNoMoreInteractions(logger); } public void testLogMessageInProductionMode() throws NodeValidationException { final Logger logger = mock(Logger.class); - BootstrapChecks.check(true, Collections.emptyList(), logger); + BootstrapChecks.check(defaultContext, true, Collections.emptyList(), logger); verify(logger).info("bound or publishing to a non-loopback or non-link-local address, enforcing bootstrap checks"); verifyNoMoreInteractions(logger); } @@ -126,7 +128,7 @@ public void testExceptionAggregation() { final List checks = Arrays.asList( new BootstrapCheck() { @Override - public boolean check() { + public boolean check(BootstrapContext context) { return true; } @@ -137,7 +139,7 @@ public String errorMessage() { }, new BootstrapCheck() { @Override - public boolean check() { + public boolean check(BootstrapContext context) { return true; } @@ -149,7 +151,8 @@ public String errorMessage() { ); final NodeValidationException e = - expectThrows(NodeValidationException.class, () -> BootstrapChecks.check(true, checks, "testExceptionAggregation")); + expectThrows(NodeValidationException.class, + () -> BootstrapChecks.check(defaultContext, true, checks, "testExceptionAggregation")); assertThat(e, hasToString(allOf(containsString("bootstrap checks failed"), containsString("first"), containsString("second")))); final Throwable[] suppressed = e.getSuppressed(); assertThat(suppressed.length, equalTo(2)); @@ -180,7 +183,7 @@ long getMaxHeapSize() { final NodeValidationException e = expectThrows( NodeValidationException.class, - () -> BootstrapChecks.check(true, Collections.singletonList(check), "testHeapSizeCheck")); + () -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testHeapSizeCheck")); assertThat( e.getMessage(), containsString("initial heap size [" + initialHeapSize.get() + "] " + @@ -188,7 +191,7 @@ long getMaxHeapSize() { initialHeapSize.set(maxHeapSize.get()); - BootstrapChecks.check(true, Collections.singletonList(check), "testHeapSizeCheck"); + BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testHeapSizeCheck"); // nothing should happen if the initial heap size or the max // heap size is not available @@ -197,7 +200,7 @@ long getMaxHeapSize() { } else { maxHeapSize.set(0); } - BootstrapChecks.check(true, Collections.singletonList(check), "testHeapSizeCheck"); + BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testHeapSizeCheck"); } public void testFileDescriptorLimits() throws NodeValidationException { @@ -223,17 +226,17 @@ long getMaxFileDescriptorCount() { final NodeValidationException e = expectThrows(NodeValidationException.class, - () -> BootstrapChecks.check(true, Collections.singletonList(check), "testFileDescriptorLimits")); + () -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testFileDescriptorLimits")); assertThat(e.getMessage(), containsString("max file descriptors")); maxFileDescriptorCount.set(randomIntBetween(limit + 1, Integer.MAX_VALUE)); - BootstrapChecks.check(true, Collections.singletonList(check), "testFileDescriptorLimits"); + BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testFileDescriptorLimits"); // nothing should happen if current file descriptor count is // not available maxFileDescriptorCount.set(-1); - BootstrapChecks.check(true, Collections.singletonList(check), "testFileDescriptorLimits"); + BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testFileDescriptorLimits"); } public void testFileDescriptorLimitsThrowsOnInvalidLimit() { @@ -266,17 +269,19 @@ class MlockallCheckTestCase { testCases.add(new MlockallCheckTestCase(false, false, false)); for (final MlockallCheckTestCase testCase : testCases) { - final BootstrapChecks.MlockallCheck check = new BootstrapChecks.MlockallCheck(testCase.mlockallSet) { + final BootstrapChecks.MlockallCheck check = new BootstrapChecks.MlockallCheck() { @Override boolean isMemoryLocked() { return testCase.isMemoryLocked; } }; - + BootstrapContext bootstrapContext = new BootstrapContext( + Settings.builder().put("bootstrap.memory_lock", testCase.mlockallSet).build(), null); if (testCase.shouldFail) { final NodeValidationException e = expectThrows( NodeValidationException.class, () -> BootstrapChecks.check( + bootstrapContext, true, Collections.singletonList(check), "testFileDescriptorLimitsThrowsOnInvalidLimit")); @@ -285,7 +290,8 @@ boolean isMemoryLocked() { containsString("memory locking requested for elasticsearch process but memory is not locked")); } else { // nothing should happen - BootstrapChecks.check(true, Collections.singletonList(check), "testFileDescriptorLimitsThrowsOnInvalidLimit"); + BootstrapChecks.check(bootstrapContext, true, Collections.singletonList(check), + "testFileDescriptorLimitsThrowsOnInvalidLimit"); } } } @@ -302,17 +308,17 @@ long getMaxNumberOfThreads() { final NodeValidationException e = expectThrows( NodeValidationException.class, - () -> BootstrapChecks.check(true, Collections.singletonList(check), "testMaxNumberOfThreadsCheck")); + () -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testMaxNumberOfThreadsCheck")); assertThat(e.getMessage(), containsString("max number of threads")); maxNumberOfThreads.set(randomIntBetween(limit + 1, Integer.MAX_VALUE)); - BootstrapChecks.check(true, Collections.singletonList(check), "testMaxNumberOfThreadsCheck"); + BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testMaxNumberOfThreadsCheck"); // nothing should happen if current max number of threads is // not available maxNumberOfThreads.set(-1); - BootstrapChecks.check(true, Collections.singletonList(check), "testMaxNumberOfThreadsCheck"); + BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testMaxNumberOfThreadsCheck"); } public void testMaxSizeVirtualMemory() throws NodeValidationException { @@ -332,16 +338,16 @@ long getRlimInfinity() { final NodeValidationException e = expectThrows( NodeValidationException.class, - () -> BootstrapChecks.check(true, Collections.singletonList(check), "testMaxSizeVirtualMemory")); + () -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testMaxSizeVirtualMemory")); assertThat(e.getMessage(), containsString("max size virtual memory")); maxSizeVirtualMemory.set(rlimInfinity); - BootstrapChecks.check(true, Collections.singletonList(check), "testMaxSizeVirtualMemory"); + BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testMaxSizeVirtualMemory"); // nothing should happen if max size virtual memory is not available maxSizeVirtualMemory.set(Long.MIN_VALUE); - BootstrapChecks.check(true, Collections.singletonList(check), "testMaxSizeVirtualMemory"); + BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testMaxSizeVirtualMemory"); } public void testMaxFileSizeCheck() throws NodeValidationException { @@ -361,16 +367,16 @@ long getRlimInfinity() { final NodeValidationException e = expectThrows( NodeValidationException.class, - () -> BootstrapChecks.check(true, Collections.singletonList(check), "testMaxFileSize")); + () -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testMaxFileSize")); assertThat(e.getMessage(), containsString("max file size")); maxFileSize.set(rlimInfinity); - BootstrapChecks.check(true, Collections.singletonList(check), "testMaxFileSize"); + BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testMaxFileSize"); // nothing should happen if max file size is not available maxFileSize.set(Long.MIN_VALUE); - BootstrapChecks.check(true, Collections.singletonList(check), "testMaxFileSize"); + BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testMaxFileSize"); } public void testMaxMapCountCheck() throws NodeValidationException { @@ -385,17 +391,17 @@ long getMaxMapCount() { final NodeValidationException e = expectThrows( NodeValidationException.class, - () -> BootstrapChecks.check(true, Collections.singletonList(check), "testMaxMapCountCheck")); + () -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testMaxMapCountCheck")); assertThat(e.getMessage(), containsString("max virtual memory areas vm.max_map_count")); maxMapCount.set(randomIntBetween(limit + 1, Integer.MAX_VALUE)); - BootstrapChecks.check(true, Collections.singletonList(check), "testMaxMapCountCheck"); + BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testMaxMapCountCheck"); // nothing should happen if current vm.max_map_count is not // available maxMapCount.set(-1); - BootstrapChecks.check(true, Collections.singletonList(check), "testMaxMapCountCheck"); + BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testMaxMapCountCheck"); } public void testClientJvmCheck() throws NodeValidationException { @@ -409,14 +415,14 @@ String getVmName() { final NodeValidationException e = expectThrows( NodeValidationException.class, - () -> BootstrapChecks.check(true, Collections.singletonList(check), "testClientJvmCheck")); + () -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testClientJvmCheck")); assertThat( e.getMessage(), containsString("JVM is using the client VM [Java HotSpot(TM) 32-Bit Client VM] " + "but should be using a server VM for the best performance")); vmName.set("Java HotSpot(TM) 32-Bit Server VM"); - BootstrapChecks.check(true, Collections.singletonList(check), "testClientJvmCheck"); + BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testClientJvmCheck"); } public void testUseSerialGCCheck() throws NodeValidationException { @@ -430,19 +436,22 @@ String getUseSerialGC() { final NodeValidationException e = expectThrows( NodeValidationException.class, - () -> BootstrapChecks.check(true, Collections.singletonList(check), "testUseSerialGCCheck")); + () -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testUseSerialGCCheck")); assertThat( e.getMessage(), containsString("JVM is using the serial collector but should not be for the best performance; " + "" + "either it's the default for the VM [" + JvmInfo.jvmInfo().getVmName() +"] or -XX:+UseSerialGC was explicitly specified")); useSerialGC.set("false"); - BootstrapChecks.check(true, Collections.singletonList(check), "testUseSerialGCCheck"); + BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testUseSerialGCCheck"); } public void testSystemCallFilterCheck() throws NodeValidationException { final AtomicBoolean isSystemCallFilterInstalled = new AtomicBoolean(); - final BootstrapChecks.SystemCallFilterCheck systemCallFilterEnabledCheck = new BootstrapChecks.SystemCallFilterCheck(true) { + BootstrapContext context = randomBoolean() ? new BootstrapContext(Settings.builder().put("bootstrap.system_call_filter", true) + .build(), null) : defaultContext; + + final BootstrapChecks.SystemCallFilterCheck systemCallFilterEnabledCheck = new BootstrapChecks.SystemCallFilterCheck() { @Override boolean isSystemCallFilterInstalled() { return isSystemCallFilterInstalled.get(); @@ -451,25 +460,26 @@ boolean isSystemCallFilterInstalled() { final NodeValidationException e = expectThrows( NodeValidationException.class, - () -> BootstrapChecks.check(true, Collections.singletonList(systemCallFilterEnabledCheck), "testSystemCallFilterCheck")); + () -> BootstrapChecks.check(context, true, Collections.singletonList(systemCallFilterEnabledCheck), + "testSystemCallFilterCheck")); assertThat( e.getMessage(), containsString("system call filters failed to install; " + "check the logs and fix your configuration or disable system call filters at your own risk")); isSystemCallFilterInstalled.set(true); - BootstrapChecks.check(true, Collections.singletonList(systemCallFilterEnabledCheck), "testSystemCallFilterCheck"); - - final BootstrapChecks.SystemCallFilterCheck systemCallFilterNotEnabledCheck = new BootstrapChecks.SystemCallFilterCheck(false) { + BootstrapChecks.check(context, true, Collections.singletonList(systemCallFilterEnabledCheck), "testSystemCallFilterCheck"); + BootstrapContext context_1 = new BootstrapContext(Settings.builder().put("bootstrap.system_call_filter", false).build(), null); + final BootstrapChecks.SystemCallFilterCheck systemCallFilterNotEnabledCheck = new BootstrapChecks.SystemCallFilterCheck() { @Override boolean isSystemCallFilterInstalled() { return isSystemCallFilterInstalled.get(); } }; isSystemCallFilterInstalled.set(false); - BootstrapChecks.check(true, Collections.singletonList(systemCallFilterNotEnabledCheck), "testSystemCallFilterCheck"); + BootstrapChecks.check(context_1, true, Collections.singletonList(systemCallFilterNotEnabledCheck), "testSystemCallFilterCheck"); isSystemCallFilterInstalled.set(true); - BootstrapChecks.check(true, Collections.singletonList(systemCallFilterNotEnabledCheck), "testSystemCallFilterCheck"); + BootstrapChecks.check(context_1, true, Collections.singletonList(systemCallFilterNotEnabledCheck), "testSystemCallFilterCheck"); } public void testMightForkCheck() throws NodeValidationException { @@ -573,13 +583,13 @@ private void runMightForkTest( } else { enableMightFork.run(); } - BootstrapChecks.check(true, Collections.singletonList(check), methodName); + BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), methodName); // if system call filter is enabled, but we will not fork, nothing should // happen isSystemCallFilterInstalled.set(true); disableMightFork.run(); - BootstrapChecks.check(true, Collections.singletonList(check), methodName); + BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), methodName); // if system call filter is enabled, and we might fork, the check should be enforced, regardless of bootstrap checks being enabled // or not @@ -588,7 +598,7 @@ private void runMightForkTest( final NodeValidationException e = expectThrows( NodeValidationException.class, - () -> BootstrapChecks.check(randomBoolean(), Collections.singletonList(check), methodName)); + () -> BootstrapChecks.check(defaultContext, randomBoolean(), Collections.singletonList(check), methodName)); consumer.accept(e); } @@ -613,7 +623,7 @@ String javaVersion() { final NodeValidationException e = expectThrows( NodeValidationException.class, () -> { - BootstrapChecks.check(true, checks, "testEarlyAccessCheck"); + BootstrapChecks.check(defaultContext, true, checks, "testEarlyAccessCheck"); }); assertThat( e.getMessage(), @@ -624,7 +634,7 @@ String javaVersion() { // if not on an early-access build, nothing should happen javaVersion.set(randomFrom("1.8.0_152", "9")); - BootstrapChecks.check(true, checks, "testEarlyAccessCheck"); + BootstrapChecks.check(defaultContext, true, checks, "testEarlyAccessCheck"); } @@ -660,7 +670,7 @@ boolean isJava8() { final NodeValidationException e = expectThrows( NodeValidationException.class, - () -> BootstrapChecks.check(true, Collections.singletonList(g1GCCheck), "testG1GCCheck")); + () -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(g1GCCheck), "testG1GCCheck")); assertThat( e.getMessage(), containsString( @@ -668,12 +678,12 @@ boolean isJava8() { // if G1GC is disabled, nothing should happen isG1GCEnabled.set(false); - BootstrapChecks.check(true, Collections.singletonList(g1GCCheck), "testG1GCCheck"); + BootstrapChecks.check(defaultContext, true, Collections.singletonList(g1GCCheck), "testG1GCCheck"); // if on or after update 40, nothing should happen independent of whether or not G1GC is enabled isG1GCEnabled.set(randomBoolean()); jvmVersion.set(String.format(Locale.ROOT, "25.%d-b%d", randomIntBetween(40, 112), randomIntBetween(1, 128))); - BootstrapChecks.check(true, Collections.singletonList(g1GCCheck), "testG1GCCheck"); + BootstrapChecks.check(defaultContext, true, Collections.singletonList(g1GCCheck), "testG1GCCheck"); final BootstrapChecks.G1GCCheck nonOracleCheck = new BootstrapChecks.G1GCCheck() { @@ -685,7 +695,7 @@ String jvmVendor() { }; // if not on an Oracle JVM, nothing should happen - BootstrapChecks.check(true, Collections.singletonList(nonOracleCheck), "testG1GCCheck"); + BootstrapChecks.check(defaultContext, true, Collections.singletonList(nonOracleCheck), "testG1GCCheck"); final BootstrapChecks.G1GCCheck nonJava8Check = new BootstrapChecks.G1GCCheck() { @@ -697,13 +707,13 @@ boolean isJava8() { }; // if not Java 8, nothing should happen - BootstrapChecks.check(true, Collections.singletonList(nonJava8Check), "testG1GCCheck"); + BootstrapChecks.check(defaultContext, true, Collections.singletonList(nonJava8Check), "testG1GCCheck"); } public void testAlwaysEnforcedChecks() { final BootstrapCheck check = new BootstrapCheck() { @Override - public boolean check() { + public boolean check(BootstrapContext context) { return true; } @@ -720,7 +730,7 @@ public boolean alwaysEnforce() { final NodeValidationException alwaysEnforced = expectThrows( NodeValidationException.class, - () -> BootstrapChecks.check(randomBoolean(), Collections.singletonList(check), "testAlwaysEnforcedChecks")); + () -> BootstrapChecks.check(defaultContext, randomBoolean(), Collections.singletonList(check), "testAlwaysEnforcedChecks")); assertThat(alwaysEnforced, hasToString(containsString("error"))); } diff --git a/core/src/test/java/org/elasticsearch/node/NodeTests.java b/core/src/test/java/org/elasticsearch/node/NodeTests.java index ec806799e71f6..edf5dfac76daa 100644 --- a/core/src/test/java/org/elasticsearch/node/NodeTests.java +++ b/core/src/test/java/org/elasticsearch/node/NodeTests.java @@ -22,6 +22,7 @@ import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.bootstrap.BootstrapCheck; +import org.elasticsearch.bootstrap.BootstrapContext; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; @@ -66,7 +67,7 @@ public void testNodeName() throws IOException { public static class CheckPlugin extends Plugin { public static final BootstrapCheck CHECK = new BootstrapCheck() { @Override - public boolean check() { + public boolean check(BootstrapContext context) { return false; } @@ -90,7 +91,7 @@ public void testLoadPluginBootstrapChecks() throws IOException { AtomicBoolean executed = new AtomicBoolean(false); try (Node node = new MockNode(settings.build(), Arrays.asList(getTestTransportPlugin(), CheckPlugin.class)) { @Override - protected void validateNodeBeforeAcceptingRequests(Settings settings, BoundTransportAddress boundTransportAddress, + protected void validateNodeBeforeAcceptingRequests(BootstrapContext context, BoundTransportAddress boundTransportAddress, List bootstrapChecks) throws NodeValidationException { assertEquals(1, bootstrapChecks.size()); assertSame(CheckPlugin.CHECK, bootstrapChecks.get(0)); diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilBootstrapChecksTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilBootstrapChecksTests.java index 8e346bf7d9cb9..8b0fbd6862fc1 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilBootstrapChecksTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilBootstrapChecksTests.java @@ -21,6 +21,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.NodeValidationException; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matcher; @@ -61,7 +62,7 @@ public void testEnforceBootstrapChecks() throws NodeValidationException { final List checks = Collections.singletonList( new BootstrapCheck() { @Override - public boolean check() { + public boolean check(BootstrapContext context) { return true; } @@ -75,7 +76,7 @@ public String errorMessage() { final NodeValidationException e = expectThrows( NodeValidationException.class, - () -> BootstrapChecks.check(false, checks, logger)); + () -> BootstrapChecks.check(new BootstrapContext(Settings.EMPTY, null), false, checks, logger)); final Matcher allOf = allOf(containsString("bootstrap checks failed"), containsString("error")); assertThat(e, hasToString(allOf)); @@ -87,7 +88,7 @@ public void testNonEnforcedBootstrapChecks() throws NodeValidationException { setEsEnforceBootstrapChecks(null); final Logger logger = mock(Logger.class); // nothing should happen - BootstrapChecks.check(false, emptyList(), logger); + BootstrapChecks.check(new BootstrapContext(Settings.EMPTY, null), false, emptyList(), logger); verifyNoMoreInteractions(logger); } @@ -97,7 +98,7 @@ public void testInvalidValue() { final boolean enforceLimits = randomBoolean(); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> BootstrapChecks.check(enforceLimits, emptyList(), "testInvalidValue")); + () -> BootstrapChecks.check(new BootstrapContext(Settings.EMPTY, null), enforceLimits, emptyList(), "testInvalidValue")); final Matcher matcher = containsString( "[es.enforce.bootstrap.checks] must be [true] but was [" + value + "]"); assertThat(e, hasToString(matcher)); From ca6bce75dab21297428c1086fb1906cf4c1953ce Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 13 Sep 2017 21:30:27 -0400 Subject: [PATCH 28/67] Refactor bootstrap check results and error messages This commit refactors the bootstrap checks into a single result object that encapsulates whether or not the check passed, and a failure message if the check failed. This simpifies the checks, and enables the messages to more easily be based on the state used to discern whether or not the check passed. Relates #26637 --- .../bootstrap/BootstrapCheck.java | 50 +++- .../bootstrap/BootstrapChecks.java | 271 +++++++++--------- .../bootstrap/BootstrapChecksTests.java | 37 +-- .../org/elasticsearch/node/NodeTests.java | 11 +- .../bootstrap/EvilBootstrapChecksTests.java | 13 +- 5 files changed, 186 insertions(+), 196 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java index a2620b2560c0d..78c60d694b0bb 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java @@ -19,27 +19,61 @@ package org.elasticsearch.bootstrap; +import java.util.Objects; + /** * Encapsulates a bootstrap check. */ public interface BootstrapCheck { /** - * Test if the node fails the check. - * - * @param context the bootstrap context for more sophisticated checks - * @return {@code true} if the node failed the check + * Encapsulate the result of a bootstrap check. */ - boolean check(BootstrapContext context); + final class BootstrapCheckResult { + + private final String message; + + private static final BootstrapCheckResult SUCCESS = new BootstrapCheckResult(null); + + public static BootstrapCheckResult success() { + return SUCCESS; + } + + public static BootstrapCheckResult failure(final String message) { + Objects.requireNonNull(message); + return new BootstrapCheckResult(message); + } + + private BootstrapCheckResult(final String message) { + this.message = message; + } + + public boolean isSuccess() { + return this == SUCCESS; + } + + public boolean isFailure() { + return !isSuccess(); + } + + public String getMessage() { + assert isFailure(); + assert message != null; + return message; + } + + } /** - * The error message for a failed check. + * Test if the node fails the check. * - * @return the error message on check failure + * @param context the bootstrap context + * @return the result of the bootstrap check */ - String errorMessage(); + BootstrapCheckResult check(BootstrapContext context); default boolean alwaysEnforce() { return false; } + } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java index b13f36229f9a8..54f1528e4633b 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java @@ -137,11 +137,12 @@ static void check( } for (final BootstrapCheck check : checks) { - if (check.check(context)) { + final BootstrapCheck.BootstrapCheckResult result = check.check(context); + if (result.isFailure()) { if (!(enforceLimits || enforceBootstrapChecks) && !check.alwaysEnforce()) { - ignoredErrors.add(check.errorMessage()); + ignoredErrors.add(result.getMessage()); } else { - errors.add(check.errorMessage()); + errors.add(result.getMessage()); } } } @@ -215,21 +216,20 @@ static List checks() { static class HeapSizeCheck implements BootstrapCheck { @Override - public boolean check(BootstrapContext context) { + public BootstrapCheckResult check(BootstrapContext context) { final long initialHeapSize = getInitialHeapSize(); final long maxHeapSize = getMaxHeapSize(); - return initialHeapSize != 0 && maxHeapSize != 0 && initialHeapSize != maxHeapSize; - } - - @Override - public String errorMessage() { - return String.format( - Locale.ROOT, - "initial heap size [%d] not equal to maximum heap size [%d]; " + - "this can cause resize pauses and prevents mlockall from locking the entire heap", - getInitialHeapSize(), - getMaxHeapSize() - ); + if (initialHeapSize != 0 && maxHeapSize != 0 && initialHeapSize != maxHeapSize) { + final String message = String.format( + Locale.ROOT, + "initial heap size [%d] not equal to maximum heap size [%d]; " + + "this can cause resize pauses and prevents mlockall from locking the entire heap", + getInitialHeapSize(), + getMaxHeapSize()); + return BootstrapCheckResult.failure(message); + } else { + return BootstrapCheckResult.success(); + } } // visible for testing @@ -271,19 +271,18 @@ protected FileDescriptorCheck(final int limit) { this.limit = limit; } - public final boolean check(BootstrapContext context) { + public final BootstrapCheckResult check(BootstrapContext context) { final long maxFileDescriptorCount = getMaxFileDescriptorCount(); - return maxFileDescriptorCount != -1 && maxFileDescriptorCount < limit; - } - - @Override - public final String errorMessage() { - return String.format( - Locale.ROOT, - "max file descriptors [%d] for elasticsearch process is too low, increase to at least [%d]", - getMaxFileDescriptorCount(), - limit - ); + if (maxFileDescriptorCount != -1 && maxFileDescriptorCount < limit) { + final String message = String.format( + Locale.ROOT, + "max file descriptors [%d] for elasticsearch process is too low, increase to at least [%d]", + getMaxFileDescriptorCount(), + limit); + return BootstrapCheckResult.failure(message); + } else { + return BootstrapCheckResult.success(); + } } // visible for testing @@ -296,13 +295,12 @@ long getMaxFileDescriptorCount() { static class MlockallCheck implements BootstrapCheck { @Override - public boolean check(BootstrapContext context) { - return BootstrapSettings.MEMORY_LOCK_SETTING.get(context.settings) && !isMemoryLocked(); - } - - @Override - public String errorMessage() { - return "memory locking requested for elasticsearch process but memory is not locked"; + public BootstrapCheckResult check(BootstrapContext context) { + if (BootstrapSettings.MEMORY_LOCK_SETTING.get(context.settings) && !isMemoryLocked()) { + return BootstrapCheckResult.failure("memory locking requested for elasticsearch process but memory is not locked"); + } else { + return BootstrapCheckResult.success(); + } } // visible for testing @@ -318,18 +316,18 @@ static class MaxNumberOfThreadsCheck implements BootstrapCheck { private static final long MAX_NUMBER_OF_THREADS_THRESHOLD = 1 << 12; @Override - public boolean check(BootstrapContext context) { - return getMaxNumberOfThreads() != -1 && getMaxNumberOfThreads() < MAX_NUMBER_OF_THREADS_THRESHOLD; - } - - @Override - public String errorMessage() { - return String.format( - Locale.ROOT, - "max number of threads [%d] for user [%s] is too low, increase to at least [%d]", - getMaxNumberOfThreads(), - BootstrapInfo.getSystemProperties().get("user.name"), - MAX_NUMBER_OF_THREADS_THRESHOLD); + public BootstrapCheckResult check(BootstrapContext context) { + if (getMaxNumberOfThreads() != -1 && getMaxNumberOfThreads() < MAX_NUMBER_OF_THREADS_THRESHOLD) { + final String message = String.format( + Locale.ROOT, + "max number of threads [%d] for user [%s] is too low, increase to at least [%d]", + getMaxNumberOfThreads(), + BootstrapInfo.getSystemProperties().get("user.name"), + MAX_NUMBER_OF_THREADS_THRESHOLD); + return BootstrapCheckResult.failure(message); + } else { + return BootstrapCheckResult.success(); + } } // visible for testing @@ -342,17 +340,17 @@ long getMaxNumberOfThreads() { static class MaxSizeVirtualMemoryCheck implements BootstrapCheck { @Override - public boolean check(BootstrapContext context) { - return getMaxSizeVirtualMemory() != Long.MIN_VALUE && getMaxSizeVirtualMemory() != getRlimInfinity(); - } - - @Override - public String errorMessage() { - return String.format( - Locale.ROOT, - "max size virtual memory [%d] for user [%s] is too low, increase to [unlimited]", - getMaxSizeVirtualMemory(), - BootstrapInfo.getSystemProperties().get("user.name")); + public BootstrapCheckResult check(BootstrapContext context) { + if (getMaxSizeVirtualMemory() != Long.MIN_VALUE && getMaxSizeVirtualMemory() != getRlimInfinity()) { + final String message = String.format( + Locale.ROOT, + "max size virtual memory [%d] for user [%s] is too low, increase to [unlimited]", + getMaxSizeVirtualMemory(), + BootstrapInfo.getSystemProperties().get("user.name")); + return BootstrapCheckResult.failure(message); + } else { + return BootstrapCheckResult.success(); + } } // visible for testing @@ -373,18 +371,18 @@ long getMaxSizeVirtualMemory() { static class MaxFileSizeCheck implements BootstrapCheck { @Override - public boolean check(BootstrapContext context) { + public BootstrapCheckResult check(BootstrapContext context) { final long maxFileSize = getMaxFileSize(); - return maxFileSize != Long.MIN_VALUE && maxFileSize != getRlimInfinity(); - } - - @Override - public String errorMessage() { - return String.format( - Locale.ROOT, - "max file size [%d] for user [%s] is too low, increase to [unlimited]", - getMaxFileSize(), - BootstrapInfo.getSystemProperties().get("user.name")); + if (maxFileSize != Long.MIN_VALUE && maxFileSize != getRlimInfinity()) { + final String message = String.format( + Locale.ROOT, + "max file size [%d] for user [%s] is too low, increase to [unlimited]", + getMaxFileSize(), + BootstrapInfo.getSystemProperties().get("user.name")); + return BootstrapCheckResult.failure(message); + } else { + return BootstrapCheckResult.success(); + } } long getRlimInfinity() { @@ -402,17 +400,17 @@ static class MaxMapCountCheck implements BootstrapCheck { private static final long LIMIT = 1 << 18; @Override - public boolean check(BootstrapContext context) { - return getMaxMapCount() != -1 && getMaxMapCount() < LIMIT; - } - - @Override - public String errorMessage() { - return String.format( - Locale.ROOT, - "max virtual memory areas vm.max_map_count [%d] is too low, increase to at least [%d]", - getMaxMapCount(), - LIMIT); + public BootstrapCheckResult check(BootstrapContext context) { + if (getMaxMapCount() != -1 && getMaxMapCount() < LIMIT) { + final String message = String.format( + Locale.ROOT, + "max virtual memory areas vm.max_map_count [%d] is too low, increase to at least [%d]", + getMaxMapCount(), + LIMIT); + return BootstrapCheckResult.failure(message); + } else { + return BootstrapCheckResult.success(); + } } // visible for testing @@ -467,8 +465,16 @@ long parseProcSysVmMaxMapCount(final String procSysVmMaxMapCount) throws NumberF static class ClientJvmCheck implements BootstrapCheck { @Override - public boolean check(BootstrapContext context) { - return getVmName().toLowerCase(Locale.ROOT).contains("client"); + public BootstrapCheckResult check(BootstrapContext context) { + if (getVmName().toLowerCase(Locale.ROOT).contains("client")) { + final String message = String.format( + Locale.ROOT, + "JVM is using the client VM [%s] but should be using a server VM for the best performance", + getVmName()); + return BootstrapCheckResult.failure(message); + } else { + return BootstrapCheckResult.success(); + } } // visible for testing @@ -476,14 +482,6 @@ String getVmName() { return JvmInfo.jvmInfo().getVmName(); } - @Override - public String errorMessage() { - return String.format( - Locale.ROOT, - "JVM is using the client VM [%s] but should be using a server VM for the best performance", - getVmName()); - } - } /** @@ -493,8 +491,17 @@ public String errorMessage() { static class UseSerialGCCheck implements BootstrapCheck { @Override - public boolean check(BootstrapContext context) { - return getUseSerialGC().equals("true"); + public BootstrapCheckResult check(BootstrapContext context) { + if (getUseSerialGC().equals("true")) { + final String message = String.format( + Locale.ROOT, + "JVM is using the serial collector but should not be for the best performance; " + + "either it's the default for the VM [%s] or -XX:+UseSerialGC was explicitly specified", + JvmInfo.jvmInfo().getVmName()); + return BootstrapCheckResult.failure(message); + } else { + return BootstrapCheckResult.success(); + } } // visible for testing @@ -502,15 +509,6 @@ String getUseSerialGC() { return JvmInfo.jvmInfo().useSerialGC(); } - @Override - public String errorMessage() { - return String.format( - Locale.ROOT, - "JVM is using the serial collector but should not be for the best performance; " + - "either it's the default for the VM [%s] or -XX:+UseSerialGC was explicitly specified", - JvmInfo.jvmInfo().getVmName()); - } - } /** @@ -519,8 +517,14 @@ public String errorMessage() { static class SystemCallFilterCheck implements BootstrapCheck { @Override - public boolean check(BootstrapContext context) { - return BootstrapSettings.SYSTEM_CALL_FILTER_SETTING.get(context.settings) && !isSystemCallFilterInstalled(); + public BootstrapCheckResult check(BootstrapContext context) { + if (BootstrapSettings.SYSTEM_CALL_FILTER_SETTING.get(context.settings) && !isSystemCallFilterInstalled()) { + final String message = "system call filters failed to install; " + + "check the logs and fix your configuration or disable system call filters at your own risk"; + return BootstrapCheckResult.failure(message); + } else { + return BootstrapCheckResult.success(); + } } // visible for testing @@ -528,21 +532,21 @@ boolean isSystemCallFilterInstalled() { return Natives.isSystemCallFilterInstalled(); } - @Override - public String errorMessage() { - return "system call filters failed to install; " + - "check the logs and fix your configuration or disable system call filters at your own risk"; - } - } abstract static class MightForkCheck implements BootstrapCheck { @Override - public boolean check(BootstrapContext context) { - return isSystemCallFilterInstalled() && mightFork(); + public BootstrapCheckResult check(BootstrapContext context) { + if (isSystemCallFilterInstalled() && mightFork()) { + return BootstrapCheckResult.failure(message(context)); + } else { + return BootstrapCheckResult.success(); + } } + abstract String message(BootstrapContext context); + // visible for testing boolean isSystemCallFilterInstalled() { return Natives.isSystemCallFilterInstalled(); @@ -572,7 +576,7 @@ String onError() { } @Override - public String errorMessage() { + String message(BootstrapContext context) { return String.format( Locale.ROOT, "OnError [%s] requires forking but is prevented by system call filters ([%s=true]);" + @@ -596,8 +600,7 @@ String onOutOfMemoryError() { return JvmInfo.jvmInfo().onOutOfMemoryError(); } - @Override - public String errorMessage() { + String message(BootstrapContext context) { return String.format( Locale.ROOT, "OnOutOfMemoryError [%s] requires forking but is prevented by system call filters ([%s=true]);" + @@ -614,8 +617,17 @@ public String errorMessage() { static class EarlyAccessCheck implements BootstrapCheck { @Override - public boolean check(BootstrapContext context) { - return "Oracle Corporation".equals(jvmVendor()) && javaVersion().endsWith("-ea"); + public BootstrapCheckResult check(BootstrapContext context) { + final String javaVersion = javaVersion(); + if ("Oracle Corporation".equals(jvmVendor()) && javaVersion.endsWith("-ea")) { + final String message = String.format( + Locale.ROOT, + "Java version [%s] is an early-access build, only use release builds", + javaVersion); + return BootstrapCheckResult.failure(message); + } else { + return BootstrapCheckResult.success(); + } } String jvmVendor() { @@ -626,14 +638,6 @@ String javaVersion() { return Constants.JAVA_VERSION; } - @Override - public String errorMessage() { - return String.format( - Locale.ROOT, - "Java version [%s] is an early-access build, only use release builds", - javaVersion()); - } - } /** @@ -642,7 +646,7 @@ public String errorMessage() { static class G1GCCheck implements BootstrapCheck { @Override - public boolean check(BootstrapContext context) { + public BootstrapCheckResult check(BootstrapContext context) { if ("Oracle Corporation".equals(jvmVendor()) && isJava8() && isG1GCEnabled()) { final String jvmVersion = jvmVersion(); // HotSpot versions on Java 8 match this regular expression; note that this changes with Java 9 after JEP-223 @@ -653,10 +657,14 @@ public boolean check(BootstrapContext context) { final int major = Integer.parseInt(matcher.group(1)); final int update = Integer.parseInt(matcher.group(2)); // HotSpot versions for Java 8 have major version 25, the bad versions are all versions prior to update 40 - return major == 25 && update < 40; - } else { - return false; + if (major == 25 && update < 40) { + final String message = String.format( + Locale.ROOT, + "JVM version [%s] can cause data corruption when used with G1GC; upgrade to at least Java 8u40", jvmVersion); + return BootstrapCheckResult.failure(message); + } } + return BootstrapCheckResult.success(); } // visible for testing @@ -682,13 +690,6 @@ boolean isJava8() { return JavaVersion.current().equals(JavaVersion.parse("1.8")); } - @Override - public String errorMessage() { - return String.format( - Locale.ROOT, - "JVM version [%s] can cause data corruption when used with G1GC; upgrade to at least Java 8u40", jvmVersion()); - } - } } diff --git a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java index a02590c30e0c5..a70d96a302c84 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java @@ -52,6 +52,7 @@ import static org.mockito.Mockito.when; public class BootstrapChecksTests extends ESTestCase { + private static final BootstrapContext defaultContext = new BootstrapContext(Settings.EMPTY, MetaData.EMPTY_META_DATA); public void testNonProductionMode() throws NodeValidationException { @@ -126,29 +127,8 @@ public void testEnforceLimitsWhenPublishingToNonLocalAddress() { public void testExceptionAggregation() { final List checks = Arrays.asList( - new BootstrapCheck() { - @Override - public boolean check(BootstrapContext context) { - return true; - } - - @Override - public String errorMessage() { - return "first"; - } - }, - new BootstrapCheck() { - @Override - public boolean check(BootstrapContext context) { - return true; - } - - @Override - public String errorMessage() { - return "second"; - } - } - ); + context -> BootstrapCheck.BootstrapCheckResult.failure("first"), + context -> BootstrapCheck.BootstrapCheckResult.failure("second")); final NodeValidationException e = expectThrows(NodeValidationException.class, @@ -497,7 +477,7 @@ boolean mightFork() { } @Override - public String errorMessage() { + String message(BootstrapContext context) { return "error"; } }; @@ -713,13 +693,8 @@ boolean isJava8() { public void testAlwaysEnforcedChecks() { final BootstrapCheck check = new BootstrapCheck() { @Override - public boolean check(BootstrapContext context) { - return true; - } - - @Override - public String errorMessage() { - return "error"; + public BootstrapCheckResult check(BootstrapContext context) { + return BootstrapCheckResult.failure("error"); } @Override diff --git a/core/src/test/java/org/elasticsearch/node/NodeTests.java b/core/src/test/java/org/elasticsearch/node/NodeTests.java index edf5dfac76daa..5f69e02db34f2 100644 --- a/core/src/test/java/org/elasticsearch/node/NodeTests.java +++ b/core/src/test/java/org/elasticsearch/node/NodeTests.java @@ -65,17 +65,8 @@ public void testNodeName() throws IOException { } public static class CheckPlugin extends Plugin { - public static final BootstrapCheck CHECK = new BootstrapCheck() { - @Override - public boolean check(BootstrapContext context) { - return false; - } + public static final BootstrapCheck CHECK = context -> BootstrapCheck.BootstrapCheckResult.success(); - @Override - public String errorMessage() { - return "boom"; - } - }; @Override public List getBootstrapChecks() { return Collections.singletonList(CHECK); diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilBootstrapChecksTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilBootstrapChecksTests.java index 8b0fbd6862fc1..0dc9ea0a170ba 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilBootstrapChecksTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilBootstrapChecksTests.java @@ -59,19 +59,8 @@ public void tearDown() throws Exception { public void testEnforceBootstrapChecks() throws NodeValidationException { setEsEnforceBootstrapChecks("true"); - final List checks = Collections.singletonList( - new BootstrapCheck() { - @Override - public boolean check(BootstrapContext context) { - return true; - } + final List checks = Collections.singletonList(context -> BootstrapCheck.BootstrapCheckResult.failure("error")); - @Override - public String errorMessage() { - return "error"; - } - } - ); final Logger logger = mock(Logger.class); final NodeValidationException e = expectThrows( From 401f4ba2cea751b5b589009aef65ac8a8d47a567 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 14 Sep 2017 09:31:14 +0200 Subject: [PATCH 29/67] Fix percolator highlight sub fetch phase to not highlight query twice (#26622) * Fix percolator highlight sub fetch phase to not highlight query twice The PercolatorHighlightSubFetchPhase does not override hitExecute and since it extends HighlightPhase the search hits are highlighted twice (by the highlight phase and then by the percolator). This does not alter the results, the second highlighting just overrides the first one but this slow down the request because it duplicates the work. --- docs/reference/search/request/highlighting.asciidoc | 2 +- .../percolator/PercolatorHighlightSubFetchPhase.java | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/reference/search/request/highlighting.asciidoc b/docs/reference/search/request/highlighting.asciidoc index 381bf472dd25b..066df3e6fa053 100644 --- a/docs/reference/search/request/highlighting.asciidoc +++ b/docs/reference/search/request/highlighting.asciidoc @@ -909,7 +909,7 @@ Response: }, "highlight": { "message": [ - "some message with the number 1" + " with the number 1" ] } } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java index a0f3c006290d0..44823f9aa012b 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java @@ -53,13 +53,13 @@ * Highlighting in the case of the percolate query is a bit different, because the PercolateQuery itself doesn't get highlighted, * but the source of the PercolateQuery gets highlighted by each hit containing a query. */ -final class PercolatorHighlightSubFetchPhase extends HighlightPhase { +final class PercolatorHighlightSubFetchPhase implements FetchSubPhase { + private final HighlightPhase highlightPhase; PercolatorHighlightSubFetchPhase(Settings settings, Map highlighters) { - super(settings, highlighters); + this.highlightPhase = new HighlightPhase(settings, highlighters); } - boolean hitsExecutionNeeded(SearchContext context) { // for testing return context.highlight() != null && locatePercolatorQuery(context.query()).isEmpty() == false; } @@ -109,7 +109,7 @@ public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOExcept percolatorLeafReaderContext, slot, percolatorIndexSearcher ); hitContext.cache().clear(); - super.hitExecute(subSearchContext, hitContext); + highlightPhase.hitExecute(subSearchContext, hitContext); for (Map.Entry entry : hitContext.hit().getHighlightFields().entrySet()) { if (percolateQuery.getDocuments().size() == 1) { String hlFieldName; From 914416e9f4810c448312c719397cca68bd53a647 Mon Sep 17 00:00:00 2001 From: "Daniel A. Ochoa" Date: Thu, 14 Sep 2017 01:10:34 -0700 Subject: [PATCH 30/67] [Docs] Update link in removal_of_types.asciidoc (#26614) Fix link to [parent-child relationship]. --- docs/reference/mapping/removal_of_types.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/mapping/removal_of_types.asciidoc b/docs/reference/mapping/removal_of_types.asciidoc index d51fba66dd10a..e47d130aa3ac6 100644 --- a/docs/reference/mapping/removal_of_types.asciidoc +++ b/docs/reference/mapping/removal_of_types.asciidoc @@ -41,7 +41,7 @@ field, so documents of different types with the same `_id` could exist in a single index. Mapping types were also used to establish a -/guide/en/elasticsearch/reference/5.4/mapping-parent-field.html[parent-child relationship] +<> between documents, so documents of type `question` could be parents to documents of type `answer`. From b6c6effa2a0f48b76e235ec8a95099fcc6915b7d Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 14 Sep 2017 11:56:02 +0200 Subject: [PATCH 31/67] Move all repository-azure classes under one single package (#26624) As we did for S3, we can collapse all packages within one single `org.elasticsearch.repositories.azure` package name. Follow up for https://github.com/elastic/elasticsearch/pull/23518#issuecomment-328903585 --- .../resources/checkstyle_suppressions.xml | 5 --- plugins/repository-azure/build.gradle | 2 +- .../azure}/AzureBlobContainer.java | 5 ++- .../azure}/AzureBlobStore.java | 7 ++-- .../azure}/AzureOutputStream.java | 2 +- .../repositories/azure/AzureRepository.java | 6 +-- .../azure/AzureRepositoryPlugin.java | 6 +-- .../azure/AzureServiceDisableException.java | 2 +- .../azure/AzureServiceRemoteException.java | 2 +- .../azure}/AzureStorageService.java | 5 +-- .../azure}/AzureStorageServiceImpl.java | 21 +++++++---- .../azure}/AzureStorageSettings.java | 2 +- .../azure}/SocketAccess.java | 2 +- .../azure/AbstractAzureIntegTestCase.java | 4 +- ...tractAzureWithThirdPartyIntegTestCase.java | 5 +-- .../azure/AzureBlobStoreContainerTests.java | 2 - .../repositories/azure/AzureRepositoryF.java | 1 - .../azure/AzureRepositorySettingsTests.java | 1 - ...zureSnapshotRestoreListSnapshotsTests.java | 7 +--- .../azure/AzureSnapshotRestoreTests.java | 7 +--- .../azure}/AzureStorageServiceMock.java | 9 +++-- .../azure}/AzureStorageServiceTests.java | 37 +++++++++---------- .../azure/AzureTestUtils.java | 2 +- 23 files changed, 61 insertions(+), 81 deletions(-) rename plugins/repository-azure/src/main/java/org/elasticsearch/{cloud/azure/blobstore => repositories/azure}/AzureBlobContainer.java (98%) rename plugins/repository-azure/src/main/java/org/elasticsearch/{cloud/azure/blobstore => repositories/azure}/AzureBlobStore.java (96%) rename plugins/repository-azure/src/main/java/org/elasticsearch/{cloud/azure/blobstore => repositories/azure}/AzureOutputStream.java (96%) rename plugins/repository-azure/src/main/java/org/elasticsearch/{plugin/repository => repositories}/azure/AzureRepositoryPlugin.java (89%) rename plugins/repository-azure/src/main/java/org/elasticsearch/{cloud => repositories}/azure/AzureServiceDisableException.java (95%) rename plugins/repository-azure/src/main/java/org/elasticsearch/{cloud => repositories}/azure/AzureServiceRemoteException.java (95%) rename plugins/repository-azure/src/main/java/org/elasticsearch/{cloud/azure/storage => repositories/azure}/AzureStorageService.java (95%) rename plugins/repository-azure/src/main/java/org/elasticsearch/{cloud/azure/storage => repositories/azure}/AzureStorageServiceImpl.java (95%) rename plugins/repository-azure/src/main/java/org/elasticsearch/{cloud/azure/storage => repositories/azure}/AzureStorageSettings.java (99%) rename plugins/repository-azure/src/main/java/org/elasticsearch/{cloud/azure/blobstore/util => repositories/azure}/SocketAccess.java (98%) rename plugins/repository-azure/src/test/java/org/elasticsearch/{cloud => repositories}/azure/AbstractAzureIntegTestCase.java (91%) rename plugins/repository-azure/src/test/java/org/elasticsearch/{cloud => repositories}/azure/AbstractAzureWithThirdPartyIntegTestCase.java (89%) rename plugins/repository-azure/src/test/java/org/elasticsearch/{cloud/azure/storage => repositories/azure}/AzureStorageServiceMock.java (94%) rename plugins/repository-azure/src/test/java/org/elasticsearch/{cloud/azure/storage => repositories/azure}/AzureStorageServiceTests.java (91%) rename plugins/repository-azure/src/test/java/org/elasticsearch/{cloud => repositories}/azure/AzureTestUtils.java (98%) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index f3ac73690a95b..54dfe661f35a9 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -739,11 +739,6 @@ - - - - - diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 632fa56e1e9da..bb5e1e757812f 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -19,7 +19,7 @@ esplugin { description 'The Azure Repository plugin adds support for Azure storage repositories.' - classname 'org.elasticsearch.plugin.repository.azure.AzureRepositoryPlugin' + classname 'org.elasticsearch.repositories.azure.AzureRepositoryPlugin' } dependencies { diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java similarity index 98% rename from plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java rename to plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java index b1b359956b6da..8e86d1e464d44 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.cloud.azure.blobstore; +package org.elasticsearch.repositories.azure; import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; @@ -156,7 +156,8 @@ public void move(String sourceBlobName, String targetBlobName) throws IOExceptio blobStore.moveBlob(blobStore.container(), source, target); } catch (URISyntaxException | StorageException e) { - logger.warn("can not move blob [{}] to [{}] in container {{}}: {}", sourceBlobName, targetBlobName, blobStore.container(), e.getMessage()); + logger.warn("can not move blob [{}] to [{}] in container {{}}: {}", sourceBlobName, targetBlobName, blobStore.container(), + e.getMessage()); throw new IOException(e); } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java similarity index 96% rename from plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java rename to plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index 868b661b50899..659abcd3fd240 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -17,11 +17,10 @@ * under the License. */ -package org.elasticsearch.cloud.azure.blobstore; +package org.elasticsearch.repositories.azure; import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; -import org.elasticsearch.cloud.azure.storage.AzureStorageService; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobContainer; @@ -130,8 +129,8 @@ public OutputStream getOutputStream(String container, String blob) throws URISyn return this.client.getOutputStream(this.clientName, this.locMode, container, blob); } - public Map listBlobsByPrefix(String container, String keyPath, String prefix) throws URISyntaxException, StorageException - { + public Map listBlobsByPrefix(String container, String keyPath, String prefix) + throws URISyntaxException, StorageException { return this.client.listBlobsByPrefix(this.clientName, this.locMode, container, keyPath, prefix); } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureOutputStream.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureOutputStream.java similarity index 96% rename from plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureOutputStream.java rename to plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureOutputStream.java index 6a95eeba7789c..1f54f40fb6bc2 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureOutputStream.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureOutputStream.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.cloud.azure.blobstore; +package org.elasticsearch.repositories.azure; import java.io.IOException; import java.io.OutputStream; diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 2a02ec5f8d8c8..6da0028338250 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -21,8 +21,6 @@ import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; -import org.elasticsearch.cloud.azure.blobstore.AzureBlobStore; -import org.elasticsearch.cloud.azure.storage.AzureStorageService; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; @@ -43,8 +41,8 @@ import java.util.Locale; import java.util.function.Function; -import static org.elasticsearch.cloud.azure.storage.AzureStorageService.MAX_CHUNK_SIZE; -import static org.elasticsearch.cloud.azure.storage.AzureStorageService.MIN_CHUNK_SIZE; +import static org.elasticsearch.repositories.azure.AzureStorageService.MAX_CHUNK_SIZE; +import static org.elasticsearch.repositories.azure.AzureStorageService.MIN_CHUNK_SIZE; /** * Azure file system implementation of the BlobStoreRepository diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java similarity index 89% rename from plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java rename to plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java index ed2f6be776dca..3fd09962aedcb 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java @@ -17,11 +17,8 @@ * under the License. */ -package org.elasticsearch.plugin.repository.azure; +package org.elasticsearch.repositories.azure; -import org.elasticsearch.cloud.azure.storage.AzureStorageService; -import org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl; -import org.elasticsearch.cloud.azure.storage.AzureStorageSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -29,7 +26,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.Repository; -import org.elasticsearch.repositories.azure.AzureRepository; import java.util.Arrays; import java.util.Collections; diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/AzureServiceDisableException.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureServiceDisableException.java similarity index 95% rename from plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/AzureServiceDisableException.java rename to plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureServiceDisableException.java index 487997d71b63f..a100079668b54 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/AzureServiceDisableException.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureServiceDisableException.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.cloud.azure; +package org.elasticsearch.repositories.azure; public class AzureServiceDisableException extends IllegalStateException { public AzureServiceDisableException(String msg) { diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/AzureServiceRemoteException.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureServiceRemoteException.java similarity index 95% rename from plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/AzureServiceRemoteException.java rename to plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureServiceRemoteException.java index 4bd4f1d67f197..3f20e29505751 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/AzureServiceRemoteException.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureServiceRemoteException.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.cloud.azure; +package org.elasticsearch.repositories.azure; public class AzureServiceRemoteException extends IllegalStateException { public AzureServiceRemoteException(String msg) { diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java similarity index 95% rename from plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java rename to plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index bebfc03c1b7a4..26e77ac5e41a9 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -17,21 +17,18 @@ * under the License. */ -package org.elasticsearch.cloud.azure.storage; +package org.elasticsearch.repositories.azure; import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; import org.elasticsearch.common.blobstore.BlobMetaData; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.net.Proxy; import java.net.URISyntaxException; -import java.util.Locale; import java.util.Map; /** diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java similarity index 95% rename from plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java rename to plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java index bb77390868507..03a590867a7ed 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.cloud.azure.storage; +package org.elasticsearch.repositories.azure; import com.microsoft.azure.storage.CloudStorageAccount; import com.microsoft.azure.storage.LocationMode; @@ -34,7 +34,9 @@ import com.microsoft.azure.storage.blob.ListBlobItem; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.cloud.azure.blobstore.util.SocketAccess; +import org.elasticsearch.repositories.azure.AzureStorageService; +import org.elasticsearch.repositories.azure.AzureStorageSettings; +import org.elasticsearch.repositories.azure.SocketAccess; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.MapBuilder; @@ -218,7 +220,8 @@ public static String blobNameFromUri(URI uri) { } @Override - public boolean blobExists(String account, LocationMode mode, String container, String blob) throws URISyntaxException, StorageException { + public boolean blobExists(String account, LocationMode mode, String container, String blob) + throws URISyntaxException, StorageException { // Container name must be lower case. CloudBlobClient client = this.getSelectedClient(account, mode); CloudBlobContainer blobContainer = client.getContainerReference(container); @@ -246,7 +249,8 @@ public void deleteBlob(String account, LocationMode mode, String container, Stri } @Override - public InputStream getInputStream(String account, LocationMode mode, String container, String blob) throws URISyntaxException, StorageException { + public InputStream getInputStream(String account, LocationMode mode, String container, String blob) + throws URISyntaxException, StorageException { logger.trace("reading container [{}], blob [{}]", container, blob); CloudBlobClient client = this.getSelectedClient(account, mode); CloudBlockBlob blockBlobReference = client.getContainerReference(container).getBlockBlobReference(blob); @@ -254,7 +258,8 @@ public InputStream getInputStream(String account, LocationMode mode, String cont } @Override - public OutputStream getOutputStream(String account, LocationMode mode, String container, String blob) throws URISyntaxException, StorageException { + public OutputStream getOutputStream(String account, LocationMode mode, String container, String blob) + throws URISyntaxException, StorageException { logger.trace("writing container [{}], blob [{}]", container, blob); CloudBlobClient client = this.getSelectedClient(account, mode); CloudBlockBlob blockBlobReference = client.getContainerReference(container).getBlockBlobReference(blob); @@ -262,7 +267,8 @@ public OutputStream getOutputStream(String account, LocationMode mode, String co } @Override - public Map listBlobsByPrefix(String account, LocationMode mode, String container, String keyPath, String prefix) throws URISyntaxException, StorageException { + public Map listBlobsByPrefix(String account, LocationMode mode, String container, String keyPath, String prefix) + throws URISyntaxException, StorageException { // NOTE: this should be here: if (prefix == null) prefix = ""; // however, this is really inefficient since deleteBlobsByPrefix enumerates everything and // then does a prefix match on the result; it should just call listBlobsByPrefix with the prefix! @@ -293,7 +299,8 @@ enumBlobListingDetails, null, generateOperationContext(account))) { } @Override - public void moveBlob(String account, LocationMode mode, String container, String sourceBlob, String targetBlob) throws URISyntaxException, StorageException { + public void moveBlob(String account, LocationMode mode, String container, String sourceBlob, String targetBlob) + throws URISyntaxException, StorageException { logger.debug("moveBlob container [{}], sourceBlob [{}], targetBlob [{}]", container, sourceBlob, targetBlob); CloudBlobClient client = this.getSelectedClient(account, mode); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java similarity index 99% rename from plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java rename to plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java index 19473b4810ab7..aea97dddfd8bd 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.cloud.azure.storage; +package org.elasticsearch.repositories.azure; import com.microsoft.azure.storage.RetryPolicy; import org.elasticsearch.common.Strings; diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/util/SocketAccess.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java similarity index 98% rename from plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/util/SocketAccess.java rename to plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java index 6202a0a46f8e6..952fd1e4dfa72 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/util/SocketAccess.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.cloud.azure.blobstore.util; +package org.elasticsearch.repositories.azure; import com.microsoft.azure.storage.StorageException; import org.elasticsearch.SpecialPermission; diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureIntegTestCase.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AbstractAzureIntegTestCase.java similarity index 91% rename from plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureIntegTestCase.java rename to plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AbstractAzureIntegTestCase.java index 82c5e6c188b9a..04718aadfef51 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureIntegTestCase.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AbstractAzureIntegTestCase.java @@ -17,9 +17,9 @@ * under the License. */ -package org.elasticsearch.cloud.azure; +package org.elasticsearch.repositories.azure; -import org.elasticsearch.plugin.repository.azure.AzureRepositoryPlugin; +import org.elasticsearch.repositories.azure.AzureRepositoryPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureWithThirdPartyIntegTestCase.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AbstractAzureWithThirdPartyIntegTestCase.java similarity index 89% rename from plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureWithThirdPartyIntegTestCase.java rename to plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AbstractAzureWithThirdPartyIntegTestCase.java index 8f6cdce113e7a..96f4064c633a8 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureWithThirdPartyIntegTestCase.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AbstractAzureWithThirdPartyIntegTestCase.java @@ -17,17 +17,16 @@ * under the License. */ -package org.elasticsearch.cloud.azure; +package org.elasticsearch.repositories.azure; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.plugin.repository.azure.AzureRepositoryPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase.ThirdParty; import java.util.Arrays; import java.util.Collection; -import static org.elasticsearch.cloud.azure.AzureTestUtils.readSettingsFromFile; +import static org.elasticsearch.repositories.azure.AzureTestUtils.readSettingsFromFile; /** * Base class for Azure tests that require credentials. diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java index 85ca44205aa94..10deeb4676fd3 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java @@ -20,8 +20,6 @@ package org.elasticsearch.repositories.azure; import com.microsoft.azure.storage.StorageException; -import org.elasticsearch.cloud.azure.blobstore.AzureBlobStore; -import org.elasticsearch.cloud.azure.storage.AzureStorageServiceMock; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.settings.Settings; diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositoryF.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositoryF.java index bfa0621912724..bb93792aa6e61 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositoryF.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositoryF.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.MockNode; import org.elasticsearch.node.Node; -import org.elasticsearch.plugin.repository.azure.AzureRepositoryPlugin; import java.io.IOException; import java.util.Collections; diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java index 75ef13d7d8745..a2afbccf27a53 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java @@ -21,7 +21,6 @@ import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; -import org.elasticsearch.cloud.azure.storage.AzureStorageService; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreListSnapshotsTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreListSnapshotsTests.java index 989b9541d79cc..2ee43e7f6a71f 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreListSnapshotsTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreListSnapshotsTests.java @@ -23,10 +23,6 @@ import com.microsoft.azure.storage.StorageException; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import org.elasticsearch.client.Client; -import org.elasticsearch.cloud.azure.AbstractAzureWithThirdPartyIntegTestCase; -import org.elasticsearch.cloud.azure.storage.AzureStorageService; -import org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl; -import org.elasticsearch.cloud.azure.storage.AzureStorageSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.azure.AzureRepository.Repository; import org.elasticsearch.test.ESIntegTestCase; @@ -35,10 +31,9 @@ import org.junit.Before; import java.net.URISyntaxException; -import java.net.UnknownHostException; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.cloud.azure.AzureTestUtils.readSettingsFromFile; +import static org.elasticsearch.repositories.azure.AzureTestUtils.readSettingsFromFile; import static org.elasticsearch.repositories.azure.AzureSnapshotRestoreTests.getContainerName; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java index 4ad3608d44354..c8ff86ce58287 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java @@ -28,15 +28,10 @@ import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.client.Client; import org.elasticsearch.client.ClusterAdminClient; -import org.elasticsearch.cloud.azure.AbstractAzureWithThirdPartyIntegTestCase; -import org.elasticsearch.cloud.azure.storage.AzureStorageService; -import org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl; -import org.elasticsearch.cloud.azure.storage.AzureStorageSettings; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.plugin.repository.azure.AzureRepositoryPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.repositories.RepositoryVerificationException; @@ -58,7 +53,7 @@ import java.util.Locale; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.cloud.azure.AzureTestUtils.readSettingsFromFile; +import static org.elasticsearch.repositories.azure.AzureTestUtils.readSettingsFromFile; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java similarity index 94% rename from plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java rename to plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java index ba2011c276e0b..8943edf500195 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.cloud.azure.storage; +package org.elasticsearch.repositories.azure; import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; @@ -26,6 +26,7 @@ import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.repositories.azure.AzureStorageService; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; @@ -85,7 +86,8 @@ public InputStream getInputStream(String account, LocationMode mode, String cont } @Override - public OutputStream getOutputStream(String account, LocationMode mode, String container, String blob) throws URISyntaxException, StorageException { + public OutputStream getOutputStream(String account, LocationMode mode, String container, String blob) + throws URISyntaxException, StorageException { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); blobs.put(blob, outputStream); return outputStream; @@ -110,7 +112,8 @@ public Map listBlobsByPrefix(String account, LocationMode } @Override - public void moveBlob(String account, LocationMode mode, String container, String sourceBlob, String targetBlob) throws URISyntaxException, StorageException { + public void moveBlob(String account, LocationMode mode, String container, String sourceBlob, String targetBlob) + throws URISyntaxException, StorageException { for (String blobName : blobs.keySet()) { if (endsWithIgnoreCase(blobName, sourceBlob)) { ByteArrayOutputStream outputStream = blobs.get(blobName); diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java similarity index 91% rename from plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java rename to plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java index 6c05b7cdec3ad..76a99fc174c58 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java @@ -17,10 +17,9 @@ * under the License. */ -package org.elasticsearch.cloud.azure.storage; +package org.elasticsearch.repositories.azure; import com.microsoft.azure.storage.LocationMode; -import com.microsoft.azure.storage.OperationContext; import com.microsoft.azure.storage.RetryExponentialRetry; import com.microsoft.azure.storage.blob.CloudBlobClient; import org.elasticsearch.common.settings.MockSecureSettings; @@ -36,7 +35,7 @@ import java.net.UnknownHostException; import java.util.Map; -import static org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl.blobNameFromUri; +import static org.elasticsearch.repositories.azure.AzureStorageServiceImpl.blobNameFromUri; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -78,7 +77,7 @@ public void testReadSecuredSettings() { public void testGetSelectedClientWithNoPrimaryAndSecondary() { try { - new AzureStorageServiceMock(Settings.EMPTY); + new AzureStorageServiceMockForSettings(Settings.EMPTY); fail("we should have raised an IllegalArgumentException"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), is("If you want to use an azure repository, you need to define a client configuration.")); @@ -86,7 +85,7 @@ public void testGetSelectedClientWithNoPrimaryAndSecondary() { } public void testGetSelectedClientNonExisting() { - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(buildSettings()); + AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMockForSettings(buildSettings()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { azureStorageService.getSelectedClient("azure4", LocationMode.PRIMARY_ONLY); }); @@ -98,7 +97,7 @@ public void testGetSelectedClientDefaultTimeout() { .setSecureSettings(buildSecureSettings()) .put("azure.client.azure3.timeout", "30s") .build(); - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(timeoutSettings); + AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMockForSettings(timeoutSettings); CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), nullValue()); CloudBlobClient client3 = azureStorageService.getSelectedClient("azure3", LocationMode.PRIMARY_ONLY); @@ -106,13 +105,13 @@ public void testGetSelectedClientDefaultTimeout() { } public void testGetSelectedClientNoTimeout() { - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(buildSettings()); + AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMockForSettings(buildSettings()); CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(nullValue())); } public void testGetSelectedClientBackoffPolicy() { - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(buildSettings()); + AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMockForSettings(buildSettings()); CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), is(notNullValue())); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), instanceOf(RetryExponentialRetry.class)); @@ -124,7 +123,7 @@ public void testGetSelectedClientBackoffPolicyNbRetries() { .put("azure.client.azure1.max_retries", 7) .build(); - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(timeoutSettings); + AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMockForSettings(timeoutSettings); CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), is(notNullValue())); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), instanceOf(RetryExponentialRetry.class)); @@ -134,7 +133,7 @@ public void testNoProxy() { Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .build(); - AzureStorageServiceMock mock = new AzureStorageServiceMock(settings); + AzureStorageServiceMockForSettings mock = new AzureStorageServiceMockForSettings(settings); assertThat(mock.storageSettings.get("azure1").getProxy(), nullValue()); assertThat(mock.storageSettings.get("azure2").getProxy(), nullValue()); assertThat(mock.storageSettings.get("azure3").getProxy(), nullValue()); @@ -147,7 +146,7 @@ public void testProxyHttp() throws UnknownHostException { .put("azure.client.azure1.proxy.port", 8080) .put("azure.client.azure1.proxy.type", "http") .build(); - AzureStorageServiceMock mock = new AzureStorageServiceMock(settings); + AzureStorageServiceMockForSettings mock = new AzureStorageServiceMockForSettings(settings); Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); assertThat(azure1Proxy, notNullValue()); @@ -167,7 +166,7 @@ public void testMultipleProxies() throws UnknownHostException { .put("azure.client.azure2.proxy.port", 8081) .put("azure.client.azure2.proxy.type", "http") .build(); - AzureStorageServiceMock mock = new AzureStorageServiceMock(settings); + AzureStorageServiceMockForSettings mock = new AzureStorageServiceMockForSettings(settings); Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); assertThat(azure1Proxy, notNullValue()); assertThat(azure1Proxy.type(), is(Proxy.Type.HTTP)); @@ -186,7 +185,7 @@ public void testProxySocks() throws UnknownHostException { .put("azure.client.azure1.proxy.port", 8080) .put("azure.client.azure1.proxy.type", "socks") .build(); - AzureStorageServiceMock mock = new AzureStorageServiceMock(settings); + AzureStorageServiceMockForSettings mock = new AzureStorageServiceMockForSettings(settings); Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); assertThat(azure1Proxy, notNullValue()); assertThat(azure1Proxy.type(), is(Proxy.Type.SOCKS)); @@ -202,7 +201,7 @@ public void testProxyNoHost() { .put("azure.client.azure1.proxy.type", randomFrom("socks", "http")) .build(); - SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMock(settings)); + SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMockForSettings(settings)); assertEquals("Azure Proxy type has been set but proxy host or port is not defined.", e.getMessage()); } @@ -213,7 +212,7 @@ public void testProxyNoPort() { .put("azure.client.azure1.proxy.type", randomFrom("socks", "http")) .build(); - SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMock(settings)); + SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMockForSettings(settings)); assertEquals("Azure Proxy type has been set but proxy host or port is not defined.", e.getMessage()); } @@ -224,7 +223,7 @@ public void testProxyNoType() { .put("azure.client.azure1.proxy.port", 8080) .build(); - SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMock(settings)); + SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMockForSettings(settings)); assertEquals("Azure Proxy port or host have been set but proxy type is not defined.", e.getMessage()); } @@ -236,15 +235,15 @@ public void testProxyWrongHost() { .put("azure.client.azure1.proxy.port", 8080) .build(); - SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMock(settings)); + SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMockForSettings(settings)); assertEquals("Azure proxy host is unknown.", e.getMessage()); } /** * This internal class just overload createClient method which is called by AzureStorageServiceImpl.doStart() */ - class AzureStorageServiceMock extends AzureStorageServiceImpl { - AzureStorageServiceMock(Settings settings) { + class AzureStorageServiceMockForSettings extends AzureStorageServiceImpl { + AzureStorageServiceMockForSettings(Settings settings) { super(settings, AzureStorageSettings.load(settings)); } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/AzureTestUtils.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureTestUtils.java similarity index 98% rename from plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/AzureTestUtils.java rename to plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureTestUtils.java index 097f519db0363..271fea1722f46 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/AzureTestUtils.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureTestUtils.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.cloud.azure; +package org.elasticsearch.repositories.azure; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.PathUtils; From c7c6443b1097273c997e839f82132ddc3d75c455 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 14 Sep 2017 15:08:20 +0200 Subject: [PATCH 32/67] [Docs] "The the" is a great band, but ... (#26644) Removing several occurrences of this typo in the docs and javadocs, seems to be a common mistake. Corrections turn up once in a while in PRs, better to correct some of this in one sweep. --- .../java/org/elasticsearch/action/DocWriteResponse.java | 2 +- .../admin/cluster/node/tasks/list/ListTasksResponse.java | 2 +- .../org/elasticsearch/action/support/AutoCreateIndex.java | 2 +- .../action/support/replication/ReplicationOperation.java | 2 +- .../org/elasticsearch/cluster/node/DiscoveryNode.java | 2 +- .../cluster/routing/IndexShardRoutingTable.java | 4 +--- .../common/xcontent/XContentParserUtils.java | 2 +- .../java/org/elasticsearch/index/engine/VersionValue.java | 2 +- .../java/org/elasticsearch/index/seqno/SeqNoStats.java | 3 +-- .../java/org/elasticsearch/index/shard/IndexShard.java | 4 +++- .../index/shard/IndexShardOperationPermits.java | 2 +- .../main/java/org/elasticsearch/script/ScriptService.java | 2 +- .../org/elasticsearch/transport/TransportService.java | 2 +- .../elasticsearch/index/engine/InternalEngineTests.java | 3 ++- .../index/seqno/GlobalCheckpointTrackerTests.java | 2 +- .../elasticsearch/search/sort/FieldSortBuilderTests.java | 4 ++-- .../search/sort/GeoDistanceSortBuilderTests.java | 4 ++-- .../elasticsearch/search/sort/ScriptSortBuilderTests.java | 8 ++++---- docs/painless/painless-operators.asciidoc | 2 +- docs/reference/docs/delete-by-query.asciidoc | 2 +- docs/reference/docs/reindex.asciidoc | 2 +- docs/reference/docs/update-by-query.asciidoc | 2 +- docs/reference/ingest/ingest-node.asciidoc | 4 ++-- docs/reference/modules/scripting/engine.asciidoc | 2 +- docs/reference/query-dsl/percolate-query.asciidoc | 2 +- docs/reference/setup/upgrade/upgrade-node.asciidoc | 2 +- .../org/elasticsearch/tribe/TribeIntegrationTests.java | 2 +- .../org/elasticsearch/cloud/gce/GceInstancesService.java | 2 +- .../ingest/geoip/GeoIpProcessorFactoryTests.java | 2 +- qa/vagrant/src/test/resources/packaging/utils/utils.bash | 2 +- .../test/transport/MockTransportService.java | 7 ++++--- 31 files changed, 43 insertions(+), 42 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java index fa3b16ef0d579..69ba6db63ef07 100644 --- a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -60,7 +60,7 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr private static final String FORCED_REFRESH = "forced_refresh"; /** - * An enum that represents the the results of CRUD operations, primarily used to communicate the type of + * An enum that represents the results of CRUD operations, primarily used to communicate the type of * operation that occurred. */ public enum Result implements Writeable { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java index a203dd35b47ff..de5fcf9345d23 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java @@ -110,7 +110,7 @@ private void buildTaskGroups() { // we found parent in the list of tasks - add it to the parent list parentTask.addGroup(taskGroup); } else { - // we got zombie or the parent was filtered out - add it to the the top task list + // we got zombie or the parent was filtered out - add it to the top task list topLevelTasks.add(taskGroup); } } else { diff --git a/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java b/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java index 2e442e2cc141c..d834d80338432 100644 --- a/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java +++ b/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java @@ -64,7 +64,7 @@ public boolean needToCheck() { /** * Should the index be auto created? - * @throws IndexNotFoundException if the the index doesn't exist and shouldn't be auto created + * @throws IndexNotFoundException if the index doesn't exist and shouldn't be auto created */ public boolean shouldAutoCreate(String index, ClusterState state) { if (resolver.hasIndexOrAlias(index, state)) { diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index f28beed1d7fac..1b4fd20140dad 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -343,7 +343,7 @@ public interface Primary< public interface Replicas> { /** - * Performs the the specified request on the specified replica. + * Performs the specified request on the specified replica. * * @param replica the shard this request should be executed on * @param replicaRequest the operation to perform diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java index 283982446dd08..7940fe1bc7045 100644 --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java +++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java @@ -270,7 +270,7 @@ public String getId() { } /** - * The unique ephemeral id of the node. Ephemeral ids are meant to be attached the the life span + * The unique ephemeral id of the node. Ephemeral ids are meant to be attached the life span * of a node process. When ever a node is restarted, it's ephemeral id is required to change (while it's {@link #getId()} * will be read from the data folder and will remain the same across restarts). Since all node attributes and addresses * are maintained during the life span of a node process, we can (and are) using the ephemeralId in diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index 4376980eca8a9..f8d42b3d8f5a0 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -44,9 +44,7 @@ import java.util.Locale; import java.util.Map; import java.util.Optional; -import java.util.OptionalDouble; import java.util.Set; -import java.util.stream.Collectors; import static java.util.Collections.emptyMap; @@ -402,7 +400,7 @@ private static class NodeRankComparator implements Comparator { @Override public int compare(ShardRouting s1, ShardRouting s2) { if (s1.currentNodeId().equals(s2.currentNodeId())) { - // these shards on the the same node + // these shards on the same node return 0; } Double shard1rank = nodeRanks.get(s1.currentNodeId()); diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentParserUtils.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentParserUtils.java index e28b44b42c5a2..77d62f8d3095a 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentParserUtils.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentParserUtils.java @@ -39,7 +39,7 @@ private XContentParserUtils() { } /** - * Makes sure that current token is of type {@link XContentParser.Token#FIELD_NAME} and the the field name is equal to the provided one + * Makes sure that current token is of type {@link XContentParser.Token#FIELD_NAME} and the field name is equal to the provided one * @throws ParsingException if the token is not of type {@link XContentParser.Token#FIELD_NAME} or is not equal to the given field name */ public static void ensureFieldName(XContentParser parser, Token token, String fieldName) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/index/engine/VersionValue.java b/core/src/main/java/org/elasticsearch/index/engine/VersionValue.java index 1c2fa3005207d..f3d9618838f7b 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/VersionValue.java +++ b/core/src/main/java/org/elasticsearch/index/engine/VersionValue.java @@ -34,7 +34,7 @@ class VersionValue implements Accountable { /** the seq number of the operation that last changed the associated uuid */ final long seqNo; - /** the the term of the operation that last changed the associated uuid */ + /** the term of the operation that last changed the associated uuid */ final long term; VersionValue(long version, long seqNo, long term) { diff --git a/core/src/main/java/org/elasticsearch/index/seqno/SeqNoStats.java b/core/src/main/java/org/elasticsearch/index/seqno/SeqNoStats.java index 6c420d64d84be..9c1795d654ccc 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/SeqNoStats.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/SeqNoStats.java @@ -22,7 +22,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -42,7 +41,7 @@ public class SeqNoStats implements ToXContentFragment, Writeable { public SeqNoStats(long maxSeqNo, long localCheckpoint, long globalCheckpoint) { assert localCheckpoint <= maxSeqNo: "local checkpoint [" + localCheckpoint + "] is above maximum seq no [" + maxSeqNo + "]"; - // note that the the global checkpoint can be higher from both maxSeqNo and localCheckpoint + // note that the global checkpoint can be higher from both maxSeqNo and localCheckpoint // as we use this stats object to describe lucene commits as well as live statistic. this.maxSeqNo = maxSeqNo; this.localCheckpoint = localCheckpoint; diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 9845064c4df4a..34ed1b4ce9e35 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -609,6 +609,7 @@ private void verifyRelocatingState() { } } + @Override public IndexShardState state() { return state; } @@ -1346,6 +1347,7 @@ public RecoveryStats recoveryStats() { * Returns the current {@link RecoveryState} if this shard is recovering or has been recovering. * Returns null if the recovery has not yet started or shard was not recovered (created via an API). */ + @Override public RecoveryState recoveryState() { return this.recoveryState; } @@ -1757,7 +1759,7 @@ public void updateGlobalCheckpointOnReplica(final long globalCheckpoint, final S * case that the global checkpoint update from the primary is ahead of the local checkpoint on this shard. In this case, we * ignore the global checkpoint update. This can happen if we are in the translog stage of recovery. Prior to this, the engine * is not opened and this shard will not receive global checkpoint updates, and after this the shard will be contributing to - * calculations of the the global checkpoint. However, we can not assert that we are in the translog stage of recovery here as + * calculations of the global checkpoint. However, we can not assert that we are in the translog stage of recovery here as * while the global checkpoint update may have emanated from the primary when we were in that state, we could subsequently move * to recovery finalization, or even finished recovery before the update arrives here. */ diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShardOperationPermits.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShardOperationPermits.java index ac3459b78e9a3..3f6d443aa8009 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShardOperationPermits.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShardOperationPermits.java @@ -183,7 +183,7 @@ private void releaseDelayedOperations() { * - blockOperations can be called on a recovery thread which can be expected to be interrupted when recovery is cancelled; * interruptions are bad here as permit acquisition will throw an interrupted exception which will be swallowed by * the threaded action listener if the queue of the thread pool on which it submits is full - * - if a permit is acquired and the queue of the thread pool which the the threaded action listener uses is full, the + * - if a permit is acquired and the queue of the thread pool which the threaded action listener uses is full, the * onFailure handler is executed on the calling thread; this should not be the recovery thread as it would delay the * recovery */ diff --git a/core/src/main/java/org/elasticsearch/script/ScriptService.java b/core/src/main/java/org/elasticsearch/script/ScriptService.java index 2998041d8a374..652ec3dda3d29 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptService.java @@ -272,7 +272,7 @@ public FactoryType compile(Script script, ScriptContext The version is optional, but useful in certain cases. We can ensure that we are trying to percolate the document we just have indexed. A change may be made after we have indexed, and if that is the -case the then the search request would fail with a version conflict error. +case the search request would fail with a version conflict error. The search response returned is identical as in the previous example. diff --git a/docs/reference/setup/upgrade/upgrade-node.asciidoc b/docs/reference/setup/upgrade/upgrade-node.asciidoc index 67b877cebc6f0..db9d352e83184 100644 --- a/docs/reference/setup/upgrade/upgrade-node.asciidoc +++ b/docs/reference/setup/upgrade/upgrade-node.asciidoc @@ -9,7 +9,7 @@ To upgrade using a zip or compressed tarball: .. Extract the zip or tarball to a _new_ directory. This is critical if you are not using external `config` and `data` directories. -.. Set the the `ES_PATH_CONF` environment variable to specify the location of +.. Set the `ES_PATH_CONF` environment variable to specify the location of your external `config` directory and `jvm.options` file. If you are not using an external `config` directory, copy your old configuration over to the new installation. diff --git a/modules/tribe/src/test/java/org/elasticsearch/tribe/TribeIntegrationTests.java b/modules/tribe/src/test/java/org/elasticsearch/tribe/TribeIntegrationTests.java index cd86d7216a74a..9957ad6bae9ee 100644 --- a/modules/tribe/src/test/java/org/elasticsearch/tribe/TribeIntegrationTests.java +++ b/modules/tribe/src/test/java/org/elasticsearch/tribe/TribeIntegrationTests.java @@ -110,7 +110,7 @@ public class TribeIntegrationTests extends ESIntegTestCase { private static final Predicate CLUSTER2_ONLY = c -> c.getClusterName().equals(cluster2.getClusterName()); /** - * A predicate that is used to select the the two remote clusters + * A predicate that is used to select the two remote clusters **/ private static final Predicate ALL = c -> true; diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesService.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesService.java index 6f7313051fea2..4c90fd5c3731b 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesService.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesService.java @@ -65,7 +65,7 @@ public interface GceInstancesService { /** * cloud.gce.max_wait: How long exponential backoff should retry before definitely failing. - * It's a total time since the the initial call is made. + * It's a total time since the initial call is made. * A negative value will retry indefinitely. Defaults to `-1s` (retry indefinitely). */ Setting MAX_WAIT_SETTING = diff --git a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java index 3904b043a5255..d76056cac3563 100644 --- a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java +++ b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java @@ -231,7 +231,7 @@ public void testLazyLoading() throws Exception { Files.copy(new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-Country.mmdb.gz")), geoIpConfigDir.resolve("GeoLite2-Country.mmdb.gz")); - // Loading another database reader instances, because otherwise we can't test lazy loading as the the + // Loading another database reader instances, because otherwise we can't test lazy loading as the // database readers used at class level are reused between tests. (we want to keep that otherwise running this // test will take roughly 4 times more time) Map databaseReaders = diff --git a/qa/vagrant/src/test/resources/packaging/utils/utils.bash b/qa/vagrant/src/test/resources/packaging/utils/utils.bash index 58d49558b1be0..dc5238d03f46b 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/utils.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/utils.bash @@ -1,6 +1,6 @@ #!/bin/bash -# This file contains some utilities to test the the .deb/.rpm +# This file contains some utilities to test the .deb/.rpm # packages and the SysV/Systemd scripts. # WARNING: This testing file must be executed as root and can diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index 0979cfbfea2b7..c94e9aa46a689 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -20,6 +20,7 @@ package org.elasticsearch.test.transport; import com.carrotsearch.randomizedtesting.SysGlobals; + import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -126,7 +127,7 @@ public static MockTransportService createNewService(Settings settings, Transport /** * Build the service. * - * @param clusterSettings if non null the the {@linkplain TransportService} will register with the {@link ClusterSettings} for settings + * @param clusterSettings if non null the {@linkplain TransportService} will register with the {@link ClusterSettings} for settings * updates for {@link #TRACE_LOG_EXCLUDE_SETTING} and {@link #TRACE_LOG_INCLUDE_SETTING}. */ public MockTransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor interceptor, @@ -139,7 +140,7 @@ public MockTransportService(Settings settings, Transport transport, ThreadPool t /** * Build the service. * - * @param clusterSettings if non null the the {@linkplain TransportService} will register with the {@link ClusterSettings} for settings + * @param clusterSettings if non null the {@linkplain TransportService} will register with the {@link ClusterSettings} for settings * updates for {@link #TRACE_LOG_EXCLUDE_SETTING} and {@link #TRACE_LOG_INCLUDE_SETTING}. */ public MockTransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor interceptor, @@ -345,7 +346,7 @@ public void addUnresponsiveRule(TransportAddress transportAddress, final TimeVal final long startTime = System.currentTimeMillis(); addDelegate(transportAddress, new ClearableTransport(original) { - private final Queue requestsToSendWhenCleared = new LinkedBlockingDeque(); + private final Queue requestsToSendWhenCleared = new LinkedBlockingDeque<>(); private boolean cleared = false; TimeValue getDelay() { From 59600dfe2da47dc8a30bf2f601ff6ead72d3bc78 Mon Sep 17 00:00:00 2001 From: Bernd Date: Thu, 14 Sep 2017 15:34:07 +0200 Subject: [PATCH 33/67] [Docs] Correct typo in removal_of_types.asciidoc (#26646) --- docs/reference/mapping/removal_of_types.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/mapping/removal_of_types.asciidoc b/docs/reference/mapping/removal_of_types.asciidoc index e47d130aa3ac6..006bc789f3084 100644 --- a/docs/reference/mapping/removal_of_types.asciidoc +++ b/docs/reference/mapping/removal_of_types.asciidoc @@ -103,7 +103,7 @@ larger number of primary shards for `tweets`. ==== Custom type field Of course, there is a limit to how many primary shards can exist in a cluster -so you many not want to waste an entire shard for a collection of only a few +so you may not want to waste an entire shard for a collection of only a few thousand documents. In this case, you can implement your own custom `type` field which will work in a similar way to the old `_type`. From e69c39a60f8dbac13313eba78097898840099131 Mon Sep 17 00:00:00 2001 From: Michael Basnight Date: Thu, 14 Sep 2017 11:05:46 -0500 Subject: [PATCH 34/67] Add missing catch arguments to the rest api spec (#26536) --- .../rest-api-spec/test/README.asciidoc | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc index db33513962fae..c822b665aebfc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc @@ -163,12 +163,17 @@ be caught and tested. For instance: The argument to `catch` can be any of: [horizontal] -`missing`:: a 404 response from ES -`conflict`:: a 409 response from ES -`request`:: a generic error response from ES -`param`:: a client-side error indicating an unknown parameter has been passed - to the method -`/foo bar/`:: the text of the error message matches this regular expression +`unauthorized`:: a 401 response from ES +`forbidden`:: a 403 response from ES +`missing`:: a 404 response from ES +`request_timeout`:: a 408 response from ES +`conflict`:: a 409 response from ES +`request`:: a 4xx-5xx error response from ES, not equal to any named response + above +`unavailable`:: a 503 response from ES +`param`:: a client-side error indicating an unknown parameter has been passed + to the method +`/foo bar/`:: the text of the error message matches this regular expression If `catch` is specified, then the `response` var must be cleared, and the test should fail if no error is thrown. From 1ca0b5e9e46e29b884a32b45f52fb08d07f7cbf2 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Thu, 14 Sep 2017 21:25:02 +0300 Subject: [PATCH 35/67] Introduce a History UUID as a requirement for ops based recovery (#26577) The new ops based recovery, introduce as part of #10708, is based on the assumption that all operations below the global checkpoint known to the replica do not need to be synced with the primary. This is based on the guarantee that all ops below it are available on primary and they are equal. Under normal operations this guarantee holds. Sadly, it can be violated when a primary is restored from an old snapshot. At the point the restore primary can miss operations below the replica's global checkpoint, or even worse may have total different operations at the same spot. This PR introduces the notion of a history uuid to be able to capture the difference with the restored primary (in a follow up PR). The History UUID is generated by a primary when it is first created and is synced to the replicas which are recovered via a file based recovery. The PR adds a requirement to ops based recovery to make sure that the history uuid of the source and the target are equal. Under normal operations, all shard copies will stay with that history uuid for the rest of the index lifetime and thus this is a noop. However, it gives us a place to guarantee we fall back to file base syncing in special events like a restore from snapshot (to be done as a follow up) and when someone calls the truncate translog command which can go wrong when combined with primary recovery (this is done in this PR). We considered in the past to use the translog uuid for this function (i.e., sync it across copies) and thus avoid adding an extra identifier. This idea was rejected as it removes the ability to verify that a specific translog really belongs to a specific lucene index. We also feel that having a history uuid will serve us well in the future. --- build.gradle | 2 +- .../elasticsearch/index/engine/Engine.java | 4 + .../index/engine/InternalEngine.java | 95 +++++++++---- .../elasticsearch/index/shard/IndexShard.java | 4 + .../index/shard/StoreRecovery.java | 5 +- .../org/elasticsearch/index/store/Store.java | 15 ++ .../translog/TruncateTranslogCommand.java | 133 ++++++++++-------- .../recovery/RecoverySourceHandler.java | 12 +- .../recovery/StartRecoveryRequest.java | 2 + .../index/engine/InternalEngineTests.java | 38 +++++ .../ESIndexLevelReplicationTestCase.java | 2 +- .../translog/TranslogDeletionPolicyTests.java | 4 +- .../index/translog/TranslogVersionTests.java | 2 +- .../index/translog/TruncateTranslogIT.java | 10 +- .../PeerRecoverySourceServiceTests.java | 5 +- .../PeerRecoveryTargetServiceTests.java | 9 +- .../recovery/RecoverySourceHandlerTests.java | 77 ++++------ .../indices/recovery/RecoveryTests.java | 77 +++++++++- .../recovery/StartRecoveryRequestTests.java | 8 +- docs/reference/indices/flush.asciidoc | 2 + .../upgrades/FullClusterRestartIT.java | 35 +++++ 21 files changed, 385 insertions(+), 156 deletions(-) diff --git a/build.gradle b/build.gradle index cfc8401a934e0..7b1e517a8586b 100644 --- a/build.gradle +++ b/build.gradle @@ -186,7 +186,7 @@ task verifyVersions { * after the backport of the backcompat code is complete. */ allprojects { - ext.bwc_tests_enabled = true + ext.bwc_tests_enabled = false } task verifyBwcTestsEnabled { diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 9b304de6077fc..a755044c11334 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -95,6 +95,7 @@ public abstract class Engine implements Closeable { public static final String SYNC_COMMIT_ID = "sync_id"; + public static final String HISTORY_UUID_KEY = "history_uuid"; protected final ShardId shardId; protected final String allocationId; @@ -183,6 +184,9 @@ public MergeStats getMergeStats() { return new MergeStats(); } + /** returns the history uuid for the engine */ + public abstract String getHistoryUUID(); + /** * A throttling class that can be activated, causing the * {@code acquireThrottle} method to block on a lock when throttling diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index e1bf949f50eab..d7cf3e16069e1 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -48,6 +48,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lucene.LoggerInfoStream; import org.elasticsearch.common.lucene.Lucene; @@ -142,6 +143,8 @@ public class InternalEngine extends Engine { private final CounterMetric numVersionLookups = new CounterMetric(); private final CounterMetric numIndexVersionsLookups = new CounterMetric(); + @Nullable + private final String historyUUID; public InternalEngine(EngineConfig engineConfig) throws EngineException { super(engineConfig); @@ -174,15 +177,23 @@ public InternalEngine(EngineConfig engineConfig) throws EngineException { switch (openMode) { case OPEN_INDEX_AND_TRANSLOG: writer = createWriter(false); + String existingHistoryUUID = loadHistoryUUIDFromCommit(writer); + if (existingHistoryUUID == null) { + historyUUID = UUIDs.randomBase64UUID(); + } else { + historyUUID = existingHistoryUUID; + } final long globalCheckpoint = Translog.readGlobalCheckpoint(engineConfig.getTranslogConfig().getTranslogPath()); seqNoStats = store.loadSeqNoStats(globalCheckpoint); break; case OPEN_INDEX_CREATE_TRANSLOG: writer = createWriter(false); + historyUUID = loadHistoryUUIDFromCommit(writer); seqNoStats = store.loadSeqNoStats(SequenceNumbers.UNASSIGNED_SEQ_NO); break; case CREATE_INDEX_AND_TRANSLOG: writer = createWriter(true); + historyUUID = UUIDs.randomBase64UUID(); seqNoStats = new SeqNoStats( SequenceNumbers.NO_OPS_PERFORMED, SequenceNumbers.NO_OPS_PERFORMED, @@ -342,6 +353,12 @@ private void recoverFromTranslogInternal() throws IOException { flush(true, true); } else if (translog.isCurrent(translogGeneration) == false) { commitIndexWriter(indexWriter, translog, lastCommittedSegmentInfos.getUserData().get(Engine.SYNC_COMMIT_ID)); + refreshLastCommittedSegmentInfos(); + } else if (lastCommittedSegmentInfos.getUserData().containsKey(HISTORY_UUID_KEY) == false) { + assert historyUUID != null; + // put the history uuid into the index + commitIndexWriter(indexWriter, translog, lastCommittedSegmentInfos.getUserData().get(Engine.SYNC_COMMIT_ID)); + refreshLastCommittedSegmentInfos(); } // clean up what's not needed translog.trimUnreferencedReaders(); @@ -382,6 +399,11 @@ public Translog getTranslog() { return translog; } + @Override + public String getHistoryUUID() { + return historyUUID; + } + /** * Reads the current stored translog ID from the IW commit data. If the id is not found, recommits the current * translog id into lucene and returns null. @@ -401,6 +423,19 @@ private String loadTranslogUUIDFromCommit(IndexWriter writer) throws IOException } } + /** + * Reads the current stored history ID from the IW commit data. If the id is not found, returns null. + */ + @Nullable + private String loadHistoryUUIDFromCommit(final IndexWriter writer) throws IOException { + String uuid = commitDataAsMap(writer).get(HISTORY_UUID_KEY); + if (uuid == null) { + assert config().getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_rc1) : + "index was created after 6_0_0_rc1 but has no history uuid"; + } + return uuid; + } + private SearcherManager createSearcherManager() throws EngineException { boolean success = false; SearcherManager searcherManager = null; @@ -1312,30 +1347,8 @@ public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineExcepti } catch (Exception e) { throw new FlushFailedEngineException(shardId, e); } - /* - * we have to inc-ref the store here since if the engine is closed by a tragic event - * we don't acquire the write lock and wait until we have exclusive access. This might also - * dec the store reference which can essentially close the store and unless we can inc the reference - * we can't use it. - */ - store.incRef(); - try { - // reread the last committed segment infos - lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); - } catch (Exception e) { - if (isClosed.get() == false) { - try { - logger.warn("failed to read latest segment infos on flush", e); - } catch (Exception inner) { - e.addSuppressed(inner); - } - if (Lucene.isCorruptionException(e)) { - throw new FlushFailedEngineException(shardId, e); - } - } - } finally { - store.decRef(); - } + refreshLastCommittedSegmentInfos(); + } newCommitId = lastCommittedSegmentInfos.getId(); } catch (FlushFailedEngineException ex) { @@ -1353,6 +1366,33 @@ public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineExcepti return new CommitId(newCommitId); } + private void refreshLastCommittedSegmentInfos() { + /* + * we have to inc-ref the store here since if the engine is closed by a tragic event + * we don't acquire the write lock and wait until we have exclusive access. This might also + * dec the store reference which can essentially close the store and unless we can inc the reference + * we can't use it. + */ + store.incRef(); + try { + // reread the last committed segment infos + lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); + } catch (Exception e) { + if (isClosed.get() == false) { + try { + logger.warn("failed to read latest segment infos on flush", e); + } catch (Exception inner) { + e.addSuppressed(inner); + } + if (Lucene.isCorruptionException(e)) { + throw new FlushFailedEngineException(shardId, e); + } + } + } finally { + store.decRef(); + } + } + @Override public void rollTranslogGeneration() throws EngineException { try (ReleasableLock ignored = readLock.acquire()) { @@ -1874,7 +1914,7 @@ protected void commitIndexWriter(final IndexWriter writer, final Translog transl * {@link IndexWriter#commit()} call flushes all documents, we defer computation of the maximum sequence number to the time * of invocation of the commit data iterator (which occurs after all documents have been flushed to Lucene). */ - final Map commitData = new HashMap<>(5); + final Map commitData = new HashMap<>(6); commitData.put(Translog.TRANSLOG_GENERATION_KEY, translogFileGeneration); commitData.put(Translog.TRANSLOG_UUID_KEY, translogUUID); commitData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, localCheckpointValue); @@ -1883,6 +1923,9 @@ protected void commitIndexWriter(final IndexWriter writer, final Translog transl } commitData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(seqNoService().getMaxSeqNo())); commitData.put(MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, Long.toString(maxUnsafeAutoIdTimestamp.get())); + if (historyUUID != null) { + commitData.put(HISTORY_UUID_KEY, historyUUID); + } logger.trace("committing writer with commit data [{}]", commitData); return commitData.entrySet().iterator(); }); @@ -1992,7 +2035,7 @@ public boolean isRecovering() { * Gets the commit data from {@link IndexWriter} as a map. */ private static Map commitDataAsMap(final IndexWriter indexWriter) { - Map commitData = new HashMap<>(5); + Map commitData = new HashMap<>(6); for (Map.Entry entry : indexWriter.getLiveCommitData()) { commitData.put(entry.getKey(), entry.getValue()); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 34ed1b4ce9e35..dd47be5a141dd 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1585,6 +1585,10 @@ public Translog getTranslog() { return getEngine().getTranslog(); } + public String getHistoryUUID() { + return getEngine().getHistoryUUID(); + } + public IndexEventListener getIndexEventListener() { return indexEventListener; } diff --git a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 078e8b06d6e20..63b7bc0805581 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -35,10 +35,12 @@ import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.Index; +import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.mapper.MapperService; @@ -162,10 +164,11 @@ void addIndices( * document-level semantics. */ writer.setLiveCommitData(() -> { - final HashMap liveCommitData = new HashMap<>(2); + final HashMap liveCommitData = new HashMap<>(4); liveCommitData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(maxSeqNo)); liveCommitData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(maxSeqNo)); liveCommitData.put(InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, Long.toString(maxUnsafeAutoIdTimestamp)); + liveCommitData.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()); return liveCommitData.entrySet().iterator(); }); writer.commit(); diff --git a/core/src/main/java/org/elasticsearch/index/store/Store.java b/core/src/main/java/org/elasticsearch/index/store/Store.java index 6700a005c9c96..fa992e12ef220 100644 --- a/core/src/main/java/org/elasticsearch/index/store/Store.java +++ b/core/src/main/java/org/elasticsearch/index/store/Store.java @@ -79,6 +79,7 @@ import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; import java.io.Closeable; import java.io.EOFException; @@ -1027,6 +1028,20 @@ public Map getCommitUserData() { return commitUserData; } + /** + * returns the history uuid the store points at, or null if not existant. + */ + public String getHistoryUUID() { + return commitUserData.get(Engine.HISTORY_UUID_KEY); + } + + /** + * returns the translog uuid the store points at + */ + public String getTranslogUUID() { + return commitUserData.get(Translog.TRANSLOG_UUID_KEY); + } + /** * Returns true iff this metadata contains the given file. */ diff --git a/core/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java b/core/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java index 325f840bd7c30..d9b77f841ed09 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java @@ -25,6 +25,8 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.Lock; @@ -37,9 +39,11 @@ import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.seqno.SequenceNumbers; import java.io.IOException; @@ -51,6 +55,7 @@ import java.nio.file.StandardCopyOption; import java.nio.file.StandardOpenOption; import java.util.Arrays; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -101,64 +106,82 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th if (Files.exists(idxLocation) == false || Files.isDirectory(idxLocation) == false) { throw new ElasticsearchException("unable to find a shard at [" + idxLocation + "], which must exist and be a directory"); } - - // Hold the lock open for the duration of the tool running - try (Directory dir = FSDirectory.open(idxLocation, NativeFSLockFactory.INSTANCE); - Lock writeLock = dir.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { - Set translogFiles; - try { - terminal.println("Checking existing translog files"); - translogFiles = filesInDirectory(translogPath); - } catch (IOException e) { - terminal.println("encountered IOException while listing directory, aborting..."); - throw new ElasticsearchException("failed to find existing translog files", e); - } - - // Warn about ES being stopped and files being deleted - warnAboutDeletingFiles(terminal, translogFiles, batch); - - List commits; - try { - terminal.println("Reading translog UUID information from Lucene commit from shard at [" + idxLocation + "]"); - commits = DirectoryReader.listCommits(dir); - } catch (IndexNotFoundException infe) { - throw new ElasticsearchException("unable to find a valid shard at [" + idxLocation + "]", infe); - } - - // Retrieve the generation and UUID from the existing data - Map commitData = commits.get(commits.size() - 1).getUserData(); - String translogGeneration = commitData.get(Translog.TRANSLOG_GENERATION_KEY); - String translogUUID = commitData.get(Translog.TRANSLOG_UUID_KEY); - if (translogGeneration == null || translogUUID == null) { - throw new ElasticsearchException("shard must have a valid translog generation and UUID but got: [{}] and: [{}]", + try (Directory dir = FSDirectory.open(idxLocation, NativeFSLockFactory.INSTANCE)) { + final String historyUUID = UUIDs.randomBase64UUID(); + final Map commitData; + // Hold the lock open for the duration of the tool running + try (Lock writeLock = dir.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { + Set translogFiles; + try { + terminal.println("Checking existing translog files"); + translogFiles = filesInDirectory(translogPath); + } catch (IOException e) { + terminal.println("encountered IOException while listing directory, aborting..."); + throw new ElasticsearchException("failed to find existing translog files", e); + } + + // Warn about ES being stopped and files being deleted + warnAboutDeletingFiles(terminal, translogFiles, batch); + + List commits; + try { + terminal.println("Reading translog UUID information from Lucene commit from shard at [" + idxLocation + "]"); + commits = DirectoryReader.listCommits(dir); + } catch (IndexNotFoundException infe) { + throw new ElasticsearchException("unable to find a valid shard at [" + idxLocation + "]", infe); + } + + // Retrieve the generation and UUID from the existing data + commitData = commits.get(commits.size() - 1).getUserData(); + String translogGeneration = commitData.get(Translog.TRANSLOG_GENERATION_KEY); + String translogUUID = commitData.get(Translog.TRANSLOG_UUID_KEY); + if (translogGeneration == null || translogUUID == null) { + throw new ElasticsearchException("shard must have a valid translog generation and UUID but got: [{}] and: [{}]", translogGeneration, translogUUID); + } + terminal.println("Translog Generation: " + translogGeneration); + terminal.println("Translog UUID : " + translogUUID); + terminal.println("History UUID : " + historyUUID); + + Path tempEmptyCheckpoint = translogPath.resolve("temp-" + Translog.CHECKPOINT_FILE_NAME); + Path realEmptyCheckpoint = translogPath.resolve(Translog.CHECKPOINT_FILE_NAME); + Path tempEmptyTranslog = translogPath.resolve("temp-" + Translog.TRANSLOG_FILE_PREFIX + + translogGeneration + Translog.TRANSLOG_FILE_SUFFIX); + Path realEmptyTranslog = translogPath.resolve(Translog.TRANSLOG_FILE_PREFIX + + translogGeneration + Translog.TRANSLOG_FILE_SUFFIX); + + // Write empty checkpoint and translog to empty files + long gen = Long.parseLong(translogGeneration); + int translogLen = writeEmptyTranslog(tempEmptyTranslog, translogUUID); + writeEmptyCheckpoint(tempEmptyCheckpoint, translogLen, gen); + + terminal.println("Removing existing translog files"); + IOUtils.rm(translogFiles.toArray(new Path[]{})); + + terminal.println("Creating new empty checkpoint at [" + realEmptyCheckpoint + "]"); + Files.move(tempEmptyCheckpoint, realEmptyCheckpoint, StandardCopyOption.ATOMIC_MOVE); + terminal.println("Creating new empty translog at [" + realEmptyTranslog + "]"); + Files.move(tempEmptyTranslog, realEmptyTranslog, StandardCopyOption.ATOMIC_MOVE); + + // Fsync the translog directory after rename + IOUtils.fsync(translogPath, true); } - terminal.println("Translog Generation: " + translogGeneration); - terminal.println("Translog UUID : " + translogUUID); - - Path tempEmptyCheckpoint = translogPath.resolve("temp-" + Translog.CHECKPOINT_FILE_NAME); - Path realEmptyCheckpoint = translogPath.resolve(Translog.CHECKPOINT_FILE_NAME); - Path tempEmptyTranslog = translogPath.resolve("temp-" + Translog.TRANSLOG_FILE_PREFIX + - translogGeneration + Translog.TRANSLOG_FILE_SUFFIX); - Path realEmptyTranslog = translogPath.resolve(Translog.TRANSLOG_FILE_PREFIX + - translogGeneration + Translog.TRANSLOG_FILE_SUFFIX); - - // Write empty checkpoint and translog to empty files - long gen = Long.parseLong(translogGeneration); - int translogLen = writeEmptyTranslog(tempEmptyTranslog, translogUUID); - writeEmptyCheckpoint(tempEmptyCheckpoint, translogLen, gen); - - terminal.println("Removing existing translog files"); - IOUtils.rm(translogFiles.toArray(new Path[]{})); - - terminal.println("Creating new empty checkpoint at [" + realEmptyCheckpoint + "]"); - Files.move(tempEmptyCheckpoint, realEmptyCheckpoint, StandardCopyOption.ATOMIC_MOVE); - terminal.println("Creating new empty translog at [" + realEmptyTranslog + "]"); - Files.move(tempEmptyTranslog, realEmptyTranslog, StandardCopyOption.ATOMIC_MOVE); - - // Fsync the translog directory after rename - IOUtils.fsync(translogPath, true); + terminal.println("Marking index with the new history uuid"); + // commit the new histroy id + IndexWriterConfig iwc = new IndexWriterConfig(null) + .setCommitOnClose(false) + // we don't want merges to happen here - we call maybe merge on the engine + // later once we stared it up otherwise we would need to wait for it here + // we also don't specify a codec here and merges should use the engines for this index + .setMergePolicy(NoMergePolicy.INSTANCE) + .setOpenMode(IndexWriterConfig.OpenMode.APPEND); + try (IndexWriter writer = new IndexWriter(dir, iwc)) { + Map newCommitData = new HashMap<>(commitData); + newCommitData.put(Engine.HISTORY_UUID_KEY, historyUUID); + writer.setLiveCommitData(newCommitData.entrySet()); + writer.commit(); + } } catch (LockObtainFailedException lofe) { throw new ElasticsearchException("Failed to lock shard's directory at [" + idxLocation + "], is Elasticsearch still running?"); } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 73ab31975684c..70e1ba06b07e3 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -31,6 +31,7 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; @@ -147,8 +148,8 @@ public RecoveryResponse recoverToTarget() throws IOException { final Translog translog = shard.getTranslog(); final long startingSeqNo; - boolean isSequenceNumberBasedRecoveryPossible = request.startingSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && - isTranslogReadyForSequenceNumberBasedRecovery(); + final boolean isSequenceNumberBasedRecoveryPossible = request.startingSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && + isTargetSameHistory() && isTranslogReadyForSequenceNumberBasedRecovery(); if (isSequenceNumberBasedRecoveryPossible) { logger.trace("performing sequence numbers based recovery. starting at [{}]", request.startingSeqNo()); @@ -198,6 +199,13 @@ public RecoveryResponse recoverToTarget() throws IOException { return response; } + private boolean isTargetSameHistory() { + final String targetHistoryUUID = request.metadataSnapshot().getHistoryUUID(); + assert targetHistoryUUID != null || shard.indexSettings().getIndexVersionCreated().before(Version.V_6_0_0_rc1) : + "incoming target history N/A but index was created after or on 6.0.0-rc1"; + return targetHistoryUUID != null && targetHistoryUUID.equals(shard.getHistoryUUID()); + } + private void runUnderPrimaryPermit(CancellableThreads.Interruptable runnable) { cancellableThreads.execute(() -> { final PlainActionFuture onAcquired = new PlainActionFuture<>(); diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java b/core/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java index 57ea19ff298d9..cfdaddabdf15b 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java @@ -75,6 +75,8 @@ public StartRecoveryRequest(final ShardId shardId, this.metadataSnapshot = metadataSnapshot; this.primaryRelocation = primaryRelocation; this.startingSeqNo = startingSeqNo; + assert startingSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO || metadataSnapshot.getHistoryUUID() != null : + "starting seq no is set but not history uuid"; } public long recoveryId() { diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 69e1631d7db54..0ea47392d5c21 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -2810,6 +2810,44 @@ public void testRecoverFromForeignTranslog() throws IOException { assertVisibleCount(engine, numDocs, false); } + public void testRecoverFromStoreSetsHistoryUUIDIfNeeded() throws IOException { + final int numDocs = randomIntBetween(0, 3); + for (int i = 0; i < numDocs; i++) { + ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); + Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); + Engine.IndexResult index = engine.index(firstIndexRequest); + assertThat(index.getVersion(), equalTo(1L)); + } + assertVisibleCount(engine, numDocs); + engine.close(); + + IndexWriterConfig iwc = new IndexWriterConfig(null) + .setCommitOnClose(false) + // we don't want merges to happen here - we call maybe merge on the engine + // later once we stared it up otherwise we would need to wait for it here + // we also don't specify a codec here and merges should use the engines for this index + .setMergePolicy(NoMergePolicy.INSTANCE) + .setOpenMode(IndexWriterConfig.OpenMode.APPEND); + try (IndexWriter writer = new IndexWriter(store.directory(), iwc)) { + Map newCommitData = new HashMap<>(); + for (Map.Entry entry: writer.getLiveCommitData()) { + if (entry.getKey().equals(Engine.HISTORY_UUID_KEY) == false) { + newCommitData.put(entry.getKey(), entry.getValue()); + } + } + writer.setLiveCommitData(newCommitData.entrySet()); + writer.commit(); + } + + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", Settings.builder() + .put(defaultSettings.getSettings()) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_0_0_beta1) + .build()); + engine = createEngine(indexSettings, store, primaryTranslogDir, newMergePolicy(), null); + assertVisibleCount(engine, numDocs, false); + assertThat(engine.getHistoryUUID(), notNullValue()); + } + public void testShardNotAvailableExceptionWhenEngineClosedConcurrently() throws IOException, InterruptedException { AtomicReference exception = new AtomicReference<>(); String operation = randomFrom("optimize", "refresh", "flush"); diff --git a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index c38d3434c3b8f..93ebb319063f1 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -315,7 +315,7 @@ private synchronized IndexShardRoutingTable routingTable(Function, TranslogWriter> createReadersAndWriter(final TranslogWriter writer = null; List readers = new ArrayList<>(); final int numberOfReaders = randomIntBetween(0, 10); + final String translogUUID = UUIDs.randomBase64UUID(random()); for (long gen = 1; gen <= numberOfReaders + 1; gen++) { if (writer != null) { final TranslogReader reader = Mockito.spy(writer.closeIntoReader()); Mockito.doReturn(writer.getLastModifiedTime()).when(reader).getLastModifiedTime(); readers.add(reader); } - writer = TranslogWriter.create(new ShardId("index", "uuid", 0), "translog_uuid", gen, + writer = TranslogWriter.create(new ShardId("index", "uuid", 0), translogUUID, gen, tempDir.resolve(Translog.getFilename(gen)), FileChannel::open, TranslogConfig.DEFAULT_BUFFER_SIZE, () -> 1L, 1L, () -> 1L ); writer = Mockito.spy(writer); diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java index 46761698610a5..d57373ebfe349 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java @@ -89,7 +89,7 @@ public TranslogReader openReader(final Path path, final long id) throws IOExcept final long minSeqNo = SequenceNumbers.NO_OPS_PERFORMED; final long maxSeqNo = SequenceNumbers.NO_OPS_PERFORMED; final Checkpoint checkpoint = - new Checkpoint(Files.size(path), 1, id, minSeqNo, maxSeqNo, SequenceNumbers.UNASSIGNED_SEQ_NO, id); + new Checkpoint(Files.size(path), 1, id, minSeqNo, maxSeqNo, SequenceNumbers.UNASSIGNED_SEQ_NO, id); return TranslogReader.open(channel, path, checkpoint, null); } } diff --git a/core/src/test/java/org/elasticsearch/index/translog/TruncateTranslogIT.java b/core/src/test/java/org/elasticsearch/index/translog/TruncateTranslogIT.java index 60434d95e6209..c2b394b219a20 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TruncateTranslogIT.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TruncateTranslogIT.java @@ -77,7 +77,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.notNullValue; @@ -144,16 +143,12 @@ public void testCorruptTranslogTruncation() throws Exception { } } - final boolean expectSeqNoRecovery; if (randomBoolean() && numDocsToTruncate > 0) { // flush the replica, so it will have more docs than what the primary will have Index index = resolveIndex("test"); IndexShard replica = internalCluster().getInstance(IndicesService.class, replicaNode).getShardOrNull(new ShardId(index, 0)); replica.flush(new FlushRequest()); - expectSeqNoRecovery = false; - logger.info("--> ops based recovery disabled by flushing replica"); - } else { - expectSeqNoRecovery = true; + logger.info("--> performed extra flushing on replica"); } // shut down the replica node to be tested later @@ -219,8 +214,7 @@ public void testCorruptTranslogTruncation() throws Exception { final RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries("test").setActiveOnly(false).get(); final RecoveryState replicaRecoveryState = recoveryResponse.shardRecoveryStates().get("test").stream() .filter(recoveryState -> recoveryState.getPrimary() == false).findFirst().get(); - assertThat(replicaRecoveryState.getIndex().toString(), replicaRecoveryState.getIndex().recoveredFileCount(), - expectSeqNoRecovery ? equalTo(0) : greaterThan(0)); + assertThat(replicaRecoveryState.getIndex().toString(), replicaRecoveryState.getIndex().recoveredFileCount(), greaterThan(0)); } public void testCorruptTranslogTruncationOfReplica() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/PeerRecoverySourceServiceTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/PeerRecoverySourceServiceTests.java index b69fa1321ed37..524795bfa2480 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/PeerRecoverySourceServiceTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/PeerRecoverySourceServiceTests.java @@ -21,8 +21,10 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTestCase; +import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.transport.TransportService; @@ -39,7 +41,8 @@ public void testDuplicateRecoveries() throws IOException { mock(TransportService.class), mock(IndicesService.class), new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))); StartRecoveryRequest startRecoveryRequest = new StartRecoveryRequest(primary.shardId(), randomAlphaOfLength(10), - getFakeDiscoNode("source"), getFakeDiscoNode("target"), null, randomBoolean(), randomLong(), randomLong()); + getFakeDiscoNode("source"), getFakeDiscoNode("target"), Store.MetadataSnapshot.EMPTY, randomBoolean(), randomLong(), + SequenceNumbers.UNASSIGNED_SEQ_NO); RecoverySourceHandler handler = peerRecoverySourceService.ongoingRecoveries.addNewRecovery(startRecoveryRequest, primary); DelayRecoveryException delayRecoveryException = expectThrows(DelayRecoveryException.class, () -> peerRecoverySourceService.ongoingRecoveries.addNewRecovery(startRecoveryRequest, primary)); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java index f876f6bf80dbc..835d16117ad60 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -73,11 +73,11 @@ Path translogLocation() { translogLocation.set(replica.getTranslog().location()); + final Translog translog = replica.getTranslog(); + final String translogUUID = translog.getTranslogUUID(); assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); - final Translog translog = replica.getTranslog(); - translogLocation.set( - writeTranslog(replica.shardId(), translog.getTranslogUUID(), translog.currentFileGeneration(), maxSeqNo - 1)); + translogLocation.set(writeTranslog(replica.shardId(), translogUUID, translog.currentFileGeneration(), maxSeqNo - 1)); // commit is good, global checkpoint is at least max *committed* which is NO_OPS_PERFORMED assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(0L)); @@ -89,8 +89,7 @@ Path translogLocation() { // commit is not good, global checkpoint is below max assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); - translogLocation.set( - writeTranslog(replica.shardId(), translog.getTranslogUUID(), translog.currentFileGeneration(), maxSeqNo)); + translogLocation.set(writeTranslog(replica.shardId(), translogUUID, translog.currentFileGeneration(), maxSeqNo)); // commit is good, global checkpoint is above max assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(localCheckpoint + 1)); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 9f280839e8638..993cc84506498 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.FileSystemUtils; @@ -96,17 +97,9 @@ public class RecoverySourceHandlerTests extends ESTestCase { public void testSendFiles() throws Throwable { Settings settings = Settings.builder().put("indices.recovery.concurrent_streams", 1). - put("indices.recovery.concurrent_small_file_streams", 1).build(); + put("indices.recovery.concurrent_small_file_streams", 1).build(); final RecoverySettings recoverySettings = new RecoverySettings(settings, service); - final StartRecoveryRequest request = new StartRecoveryRequest( - shardId, - null, - new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), - new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), - null, - randomBoolean(), - randomNonNegativeLong(), - randomBoolean() ? SequenceNumbers.UNASSIGNED_SEQ_NO : randomNonNegativeLong()); + final StartRecoveryRequest request = getStartRecoveryRequest(); Store store = newStore(createTempDir()); RecoverySourceHandler handler = new RecoverySourceHandler(null, null, request, recoverySettings.getChunkSize().bytesAsInt(), Settings.EMPTY); @@ -151,19 +144,26 @@ public void close() throws IOException { IOUtils.close(reader, store, targetStore); } - public void testSendSnapshotSendsOps() throws IOException { - final RecoverySettings recoverySettings = new RecoverySettings(Settings.EMPTY, service); - final int fileChunkSizeInBytes = recoverySettings.getChunkSize().bytesAsInt(); - final long startingSeqNo = randomBoolean() ? SequenceNumbers.UNASSIGNED_SEQ_NO : randomIntBetween(0, 16); - final StartRecoveryRequest request = new StartRecoveryRequest( + public StartRecoveryRequest getStartRecoveryRequest() throws IOException { + Store.MetadataSnapshot metadataSnapshot = randomBoolean() ? Store.MetadataSnapshot.EMPTY : + new Store.MetadataSnapshot(Collections.emptyMap(), + Collections.singletonMap(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()), randomIntBetween(0, 100)); + return new StartRecoveryRequest( shardId, null, new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), - null, + metadataSnapshot, randomBoolean(), randomNonNegativeLong(), - randomBoolean() ? SequenceNumbers.UNASSIGNED_SEQ_NO : randomNonNegativeLong()); + randomBoolean() || metadataSnapshot.getHistoryUUID() == null ? + SequenceNumbers.UNASSIGNED_SEQ_NO : randomNonNegativeLong()); + } + + public void testSendSnapshotSendsOps() throws IOException { + final RecoverySettings recoverySettings = new RecoverySettings(Settings.EMPTY, service); + final int fileChunkSizeInBytes = recoverySettings.getChunkSize().bytesAsInt(); + final StartRecoveryRequest request = getStartRecoveryRequest(); final IndexShard shard = mock(IndexShard.class); when(shard.state()).thenReturn(IndexShardState.STARTED); final RecoveryTargetHandler recoveryTarget = mock(RecoveryTargetHandler.class); @@ -181,6 +181,7 @@ public void testSendSnapshotSendsOps() throws IOException { operations.add(new Translog.Index(index, new Engine.IndexResult(1, i - initialNumberOfDocs, true))); } operations.add(null); + final long startingSeqNo = randomBoolean() ? SequenceNumbers.UNASSIGNED_SEQ_NO : randomIntBetween(0, 16); RecoverySourceHandler.SendSnapshotResult result = handler.sendSnapshot(startingSeqNo, new Translog.Snapshot() { @Override public void close() { @@ -226,18 +227,9 @@ private Engine.Index getIndex(final String id) { public void testHandleCorruptedIndexOnSendSendFiles() throws Throwable { Settings settings = Settings.builder().put("indices.recovery.concurrent_streams", 1). - put("indices.recovery.concurrent_small_file_streams", 1).build(); + put("indices.recovery.concurrent_small_file_streams", 1).build(); final RecoverySettings recoverySettings = new RecoverySettings(settings, service); - final StartRecoveryRequest request = - new StartRecoveryRequest( - shardId, - null, - new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), - new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), - null, - randomBoolean(), - randomNonNegativeLong(), - randomBoolean() ? SequenceNumbers.UNASSIGNED_SEQ_NO : 0L); + final StartRecoveryRequest request = getStartRecoveryRequest(); Path tempDir = createTempDir(); Store store = newStore(tempDir, false); AtomicBoolean failedEngine = new AtomicBoolean(false); @@ -268,8 +260,8 @@ protected void failEngine(IOException cause) { } CorruptionUtils.corruptFile(random(), FileSystemUtils.files(tempDir, (p) -> - (p.getFileName().toString().equals("write.lock") || - p.getFileName().toString().startsWith("extra")) == false)); + (p.getFileName().toString().equals("write.lock") || + p.getFileName().toString().startsWith("extra")) == false)); Store targetStore = newStore(createTempDir(), false); try { handler.sendFiles(store, metas.toArray(new StoreFileMetaData[0]), (md) -> { @@ -296,18 +288,9 @@ public void close() throws IOException { public void testHandleExceptinoOnSendSendFiles() throws Throwable { Settings settings = Settings.builder().put("indices.recovery.concurrent_streams", 1). - put("indices.recovery.concurrent_small_file_streams", 1).build(); + put("indices.recovery.concurrent_small_file_streams", 1).build(); final RecoverySettings recoverySettings = new RecoverySettings(settings, service); - final StartRecoveryRequest request = - new StartRecoveryRequest( - shardId, - null, - new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), - new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), - null, - randomBoolean(), - randomNonNegativeLong(), - randomBoolean() ? SequenceNumbers.UNASSIGNED_SEQ_NO : 0L); + final StartRecoveryRequest request = getStartRecoveryRequest(); Path tempDir = createTempDir(); Store store = newStore(tempDir, false); AtomicBoolean failedEngine = new AtomicBoolean(false); @@ -363,17 +346,7 @@ protected void failEngine(IOException cause) { public void testThrowExceptionOnPrimaryRelocatedBeforePhase1Started() throws IOException { final RecoverySettings recoverySettings = new RecoverySettings(Settings.EMPTY, service); - final boolean attemptSequenceNumberBasedRecovery = randomBoolean(); - final StartRecoveryRequest request = - new StartRecoveryRequest( - shardId, - null, - new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), - new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), - null, - false, - randomNonNegativeLong(), - attemptSequenceNumberBasedRecovery ? randomNonNegativeLong() : SequenceNumbers.UNASSIGNED_SEQ_NO); + final StartRecoveryRequest request = getStartRecoveryRequest(); final IndexShard shard = mock(IndexShard.class); when(shard.seqNoStats()).thenReturn(mock(SeqNoStats.class)); when(shard.segmentStats(anyBoolean())).thenReturn(mock(SegmentsStats.class)); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index 48f0c2f839feb..e2314cff014bc 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -19,22 +19,35 @@ package org.elasticsearch.indices.recovery; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.NoMergePolicy; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.replication.ESIndexLevelReplicationTestCase; import org.elasticsearch.index.replication.RecoveryDuringReplicationTests; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.TranslogConfig; +import java.util.HashMap; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; +import static org.elasticsearch.index.translog.TranslogDeletionPolicyTests.createTranslogDeletionPolicy; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; public class RecoveryTests extends ESIndexLevelReplicationTestCase { @@ -54,7 +67,6 @@ public void testTranslogHistoryTransferred() throws Exception { } } - public void testRetentionPolicyChangeDuringRecovery() throws Exception { try (ReplicationGroup shards = createGroup(0)) { shards.startPrimary(); @@ -132,4 +144,67 @@ public void testRecoveryWithOutOfOrderDelete() throws Exception { assertThat(newReplica.getTranslog().totalOperations(), equalTo(translogOps)); } } + + public void testDifferentHistoryUUIDDisablesOPsRecovery() throws Exception { + try (ReplicationGroup shards = createGroup(1)) { + shards.startAll(); + // index some shared docs + final int flushedDocs = 10; + final int nonFlushedDocs = randomIntBetween(0, 10); + final int numDocs = flushedDocs + nonFlushedDocs; + shards.indexDocs(flushedDocs); + shards.flush(); + shards.indexDocs(nonFlushedDocs); + + IndexShard replica = shards.getReplicas().get(0); + final String translogUUID = replica.getTranslog().getTranslogUUID(); + final String historyUUID = replica.getHistoryUUID(); + Translog.TranslogGeneration translogGeneration = replica.getTranslog().getGeneration(); + shards.removeReplica(replica); + replica.close("test", false); + IndexWriterConfig iwc = new IndexWriterConfig(null) + .setCommitOnClose(false) + // we don't want merges to happen here - we call maybe merge on the engine + // later once we stared it up otherwise we would need to wait for it here + // we also don't specify a codec here and merges should use the engines for this index + .setMergePolicy(NoMergePolicy.INSTANCE) + .setOpenMode(IndexWriterConfig.OpenMode.APPEND); + Map userData = new HashMap<>(replica.store().readLastCommittedSegmentsInfo().getUserData()); + final String translogUUIDtoUse; + final long translogGenToUse; + final String historyUUIDtoUse = UUIDs.randomBase64UUID(random()); + if (randomBoolean()) { + // create a new translog + final TranslogConfig translogConfig = + new TranslogConfig(replica.shardId(), replica.shardPath().resolveTranslog(), replica.indexSettings(), + BigArrays.NON_RECYCLING_INSTANCE); + try (Translog translog = new Translog(translogConfig, null, createTranslogDeletionPolicy(), () -> flushedDocs)) { + translogUUIDtoUse = translog.getTranslogUUID(); + translogGenToUse = translog.currentFileGeneration(); + } + } else { + translogUUIDtoUse = translogGeneration.translogUUID; + translogGenToUse = translogGeneration.translogFileGeneration; + } + try (IndexWriter writer = new IndexWriter(replica.store().directory(), iwc)) { + userData.put(Engine.HISTORY_UUID_KEY, historyUUIDtoUse); + userData.put(Translog.TRANSLOG_UUID_KEY, translogUUIDtoUse); + userData.put(Translog.TRANSLOG_GENERATION_KEY, Long.toString(translogGenToUse)); + writer.setLiveCommitData(userData.entrySet()); + writer.commit(); + } + replica.store().close(); + IndexShard newReplica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId()); + shards.recoverReplica(newReplica); + // file based recovery should be made + assertThat(newReplica.recoveryState().getIndex().fileDetails(), not(empty())); + assertThat(newReplica.getTranslog().totalOperations(), equalTo(numDocs)); + + // history uuid was restored + assertThat(newReplica.getHistoryUUID(), equalTo(historyUUID)); + assertThat(newReplica.commitStats().getUserData().get(Engine.HISTORY_UUID_KEY), equalTo(historyUUID)); + + shards.assertAllEqual(numDocs); + } + } } diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java index b478243392e1b..14799687d232b 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; +import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; @@ -31,6 +32,7 @@ import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; +import java.util.Collections; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; @@ -41,6 +43,9 @@ public class StartRecoveryRequestTests extends ESTestCase { public void testSerialization() throws Exception { final Version targetNodeVersion = randomVersion(random()); + Store.MetadataSnapshot metadataSnapshot = randomBoolean() ? Store.MetadataSnapshot.EMPTY : + new Store.MetadataSnapshot(Collections.emptyMap(), + Collections.singletonMap(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()), randomIntBetween(0, 100)); final StartRecoveryRequest outRequest = new StartRecoveryRequest( new ShardId("test", "_na_", 0), UUIDs.randomBase64UUID(), @@ -49,7 +54,8 @@ public void testSerialization() throws Exception { Store.MetadataSnapshot.EMPTY, randomBoolean(), randomNonNegativeLong(), - randomBoolean() ? SequenceNumbers.UNASSIGNED_SEQ_NO : randomNonNegativeLong()); + randomBoolean() || metadataSnapshot.getHistoryUUID() == null ? + SequenceNumbers.UNASSIGNED_SEQ_NO : randomNonNegativeLong()); final ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); final OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); diff --git a/docs/reference/indices/flush.asciidoc b/docs/reference/indices/flush.asciidoc index e9cac91d740cc..0c75fd011b418 100644 --- a/docs/reference/indices/flush.asciidoc +++ b/docs/reference/indices/flush.asciidoc @@ -96,6 +96,7 @@ which returns something similar to: "generation" : 2, "user_data" : { "translog_uuid" : "hnOG3xFcTDeoI_kvvvOdNA", + "history_uuid" : "XP7KDJGiS1a2fHYiFL5TXQ", "local_checkpoint" : "-1", "translog_generation" : "1", "max_seq_no" : "-1", @@ -117,6 +118,7 @@ which returns something similar to: -------------------------------------------------- // TESTRESPONSE[s/"id" : "3M3zkw2GHMo2Y4h4\/KFKCg=="/"id": $body.indices.twitter.shards.0.0.commit.id/] // TESTRESPONSE[s/"translog_uuid" : "hnOG3xFcTDeoI_kvvvOdNA"/"translog_uuid": $body.indices.twitter.shards.0.0.commit.user_data.translog_uuid/] +// TESTRESPONSE[s/"history_uuid" : "XP7KDJGiS1a2fHYiFL5TXQ"/"history_uuid": $body.indices.twitter.shards.0.0.commit.user_data.history_uuid/] // TESTRESPONSE[s/"sync_id" : "AVvFY-071siAOuFGEO9P"/"sync_id": $body.indices.twitter.shards.0.0.commit.user_data.sync_id/] // TESTRESPONSE[s/"1": \.\.\./"1": $body.indices.twitter.shards.1/] // TESTRESPONSE[s/"2": \.\.\./"2": $body.indices.twitter.shards.2/] diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 323ecd9ae9d22..c7e708418c92c 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.test.NotEqualMessageBuilder; import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.yaml.ObjectPath; import org.junit.Before; import java.io.IOException; @@ -52,6 +53,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.notNullValue; /** * Tests to run before and after a full cluster restart. This is run twice, @@ -761,6 +763,39 @@ public void testSnapshotRestore() throws IOException { } } + public void testHistoryUUIDIsAdded() throws Exception { + if (runningAgainstOldCluster) { + XContentBuilder mappingsAndSettings = jsonBuilder(); + mappingsAndSettings.startObject(); + { + mappingsAndSettings.startObject("settings"); + mappingsAndSettings.field("number_of_shards", 1); + mappingsAndSettings.field("number_of_replicas", 1); + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + client().performRequest("PUT", "/" + index, Collections.emptyMap(), + new StringEntity(mappingsAndSettings.string(), ContentType.APPLICATION_JSON)); + } else { + Response response = client().performRequest("GET", index + "/_stats", singletonMap("level", "shards")); + List shardStats = ObjectPath.createFromResponse(response).evaluate("indices." + index + ".shards.0"); + String globalHistoryUUID = null; + for (Object shard : shardStats) { + final String nodeId = ObjectPath.evaluate(shard, "routing.node"); + final Boolean primary = ObjectPath.evaluate(shard, "routing.primary"); + logger.info("evaluating: {} , {}", ObjectPath.evaluate(shard, "routing"), ObjectPath.evaluate(shard, "commit")); + String historyUUID = ObjectPath.evaluate(shard, "commit.user_data.history_uuid"); + assertThat("no history uuid found on " + nodeId + " (primary: " + primary + ")", historyUUID, notNullValue()); + if (globalHistoryUUID == null) { + globalHistoryUUID = historyUUID; + } else { + assertThat("history uuid mismatch on " + nodeId + " (primary: " + primary + ")", historyUUID, + equalTo(globalHistoryUUID)); + } + } + } + } + private void checkSnapshot(String snapshotName, int count, Version tookOnVersion) throws IOException { // Check the snapshot metadata, especially the version String response = toStr(client().performRequest("GET", "/_snapshot/repo/" + snapshotName, listSnapshotVerboseParams())); From f385e0cf2674aca919380bd5cfa4954ce1fd80b4 Mon Sep 17 00:00:00 2001 From: Michael Basnight Date: Thu, 14 Sep 2017 14:24:03 -0500 Subject: [PATCH 36/67] Add bad_request to the rest-api-spec catch params (#26539) This adds another request to the catch params. It also makes sure that the generic request param does not allow 400 either. --- docs/reference/api-conventions.asciidoc | 4 ++-- docs/reference/ingest.asciidoc | 2 +- docs/reference/mapping/fields/routing-field.asciidoc | 2 +- docs/reference/mapping/params/coerce.asciidoc | 4 ++-- docs/reference/mapping/params/ignore-malformed.asciidoc | 2 +- .../rest-api-spec/test/indices.analyze/10_analyze.yml | 2 +- .../test/resources/rest-api-spec/test/ingest/20_crud.yml | 2 +- .../resources/rest-api-spec/test/ingest/50_on_failure.yml | 4 ++-- .../resources/rest-api-spec/test/ingest/90_simulate.yml | 6 +++--- .../resources/rest-api-spec/test/painless/15_update.yml | 2 +- .../rest-api-spec/test/update_by_query/10_script.yml | 2 +- .../src/main/resources/rest-api-spec/test/README.asciidoc | 1 + .../rest-api-spec/test/cat.segments/10_basic.yml | 2 +- .../main/resources/rest-api-spec/test/count/10_basic.yml | 2 +- .../rest-api-spec/test/create/30_internal_version.yml | 2 +- .../rest-api-spec/test/create/35_external_version.yml | 4 ++-- .../resources/rest-api-spec/test/exists/30_parent.yml | 2 +- .../resources/rest-api-spec/test/explain/10_basic.yml | 2 +- .../main/resources/rest-api-spec/test/get/30_parent.yml | 2 +- .../resources/rest-api-spec/test/get_source/30_parent.yml | 2 +- .../resources/rest-api-spec/test/index/10_with_id.yml | 2 +- .../rest-api-spec/test/indices.delete/10_basic.yml | 4 ++-- .../rest-api-spec/test/indices.open/10_basic.yml | 2 +- .../test/indices.open/20_multiple_indices.yml | 6 +++--- .../rest-api-spec/test/indices.put_alias/10_basic.yml | 4 ++-- .../rest-api-spec/test/indices.put_template/10_basic.yml | 2 +- .../rest-api-spec/test/indices.segments/10_basic.yml | 2 +- .../rest-api-spec/test/indices.stats/10_index.yml | 2 +- .../resources/rest-api-spec/test/nodes.stats/10_basic.yml | 2 +- .../test/search.aggregation/100_avg_metric.yml | 2 +- .../test/search.aggregation/110_max_metric.yml | 2 +- .../test/search.aggregation/120_min_metric.yml | 2 +- .../test/search.aggregation/130_sum_metric.yml | 2 +- .../search.aggregation/180_percentiles_tdigest_metric.yml | 6 +++--- .../search.aggregation/190_percentiles_hdr_metric.yml | 8 ++++---- .../rest-api-spec/test/search/20_default_values.yml | 2 +- .../resources/rest-api-spec/test/search/issue4895.yml | 2 +- .../resources/rest-api-spec/test/search/issue9606.yml | 4 ++-- .../resources/rest-api-spec/test/update/50_parent.yml | 2 +- .../elasticsearch/test/rest/yaml/section/DoSection.java | 2 ++ 40 files changed, 57 insertions(+), 54 deletions(-) diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index 9c4d87fcb2756..d4e06b9d05248 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -602,7 +602,7 @@ invalid `size` parameter to the `_search` API: POST /twitter/_search?size=surprise_me ---------------------------------------------------------------------- // CONSOLE -// TEST[s/surprise_me/surprise_me&error_trace=false/ catch:request] +// TEST[s/surprise_me/surprise_me&error_trace=false/ catch:bad_request] // Since the test system sends error_trace=true by default we have to override The response looks like: @@ -636,7 +636,7 @@ But if you set `error_trace=true`: POST /twitter/_search?size=surprise_me&error_trace=true ---------------------------------------------------------------------- // CONSOLE -// TEST[catch:request] +// TEST[catch:bad_request] The response looks like: diff --git a/docs/reference/ingest.asciidoc b/docs/reference/ingest.asciidoc index 7150bd32739c1..95ca46b421017 100644 --- a/docs/reference/ingest.asciidoc +++ b/docs/reference/ingest.asciidoc @@ -31,7 +31,7 @@ PUT my-index/my-type/my-id?pipeline=my_pipeline_id } -------------------------------------------------- // CONSOLE -// TEST[catch:request] +// TEST[catch:bad_request] See <> for more information about creating, adding, and deleting pipelines. diff --git a/docs/reference/mapping/fields/routing-field.asciidoc b/docs/reference/mapping/fields/routing-field.asciidoc index 96a5de1c61605..5fd8545dece5c 100644 --- a/docs/reference/mapping/fields/routing-field.asciidoc +++ b/docs/reference/mapping/fields/routing-field.asciidoc @@ -96,7 +96,7 @@ PUT my_index2/my_type/1 <2> } ------------------------------ // CONSOLE -// TEST[catch:request] +// TEST[catch:bad_request] <1> Routing is required for `my_type` documents. <2> This index request throws a `routing_missing_exception`. diff --git a/docs/reference/mapping/params/coerce.asciidoc b/docs/reference/mapping/params/coerce.asciidoc index dacdabaafc030..d3e158185b6ba 100644 --- a/docs/reference/mapping/params/coerce.asciidoc +++ b/docs/reference/mapping/params/coerce.asciidoc @@ -45,7 +45,7 @@ PUT my_index/my_type/2 } -------------------------------------------------- // CONSOLE -// TEST[catch:request] +// TEST[catch:bad_request] <1> The `number_one` field will contain the integer `10`. <2> This document will be rejected because coercion is disabled. @@ -88,6 +88,6 @@ PUT my_index/my_type/2 { "number_two": "10" } <2> -------------------------------------------------- // CONSOLE -// TEST[catch:request] +// TEST[catch:bad_request] <1> The `number_one` field overrides the index level setting to enable coercion. <2> This document will be rejected because the `number_two` field inherits the index-level coercion setting. diff --git a/docs/reference/mapping/params/ignore-malformed.asciidoc b/docs/reference/mapping/params/ignore-malformed.asciidoc index 916b01b33c190..905a0f7d78a98 100644 --- a/docs/reference/mapping/params/ignore-malformed.asciidoc +++ b/docs/reference/mapping/params/ignore-malformed.asciidoc @@ -44,7 +44,7 @@ PUT my_index/my_type/2 } -------------------------------------------------- // CONSOLE -// TEST[catch:request] +// TEST[catch:bad_request] <1> This document will have the `text` field indexed, but not the `number_one` field. <2> This document will be rejected because `number_two` does not allow malformed values. diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml index 0866dc5bc4dfd..cbb8f053cfbba 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml @@ -6,7 +6,7 @@ version: " - 5.99.99" reason: normalizer support in 6.0.0 - do: - catch: request + catch: bad_request indices.analyze: body: text: ABc diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/20_crud.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/20_crud.yml index b041e0664bb6c..0e348bbd7265d 100644 --- a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/20_crud.yml +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/20_crud.yml @@ -142,7 +142,7 @@ teardown: --- "Test invalid processor config": - do: - catch: request + catch: bad_request ingest.put_pipeline: id: "my_pipeline" body: > diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/50_on_failure.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/50_on_failure.yml index 53c1a9a7923b1..4b40d9f670bfe 100644 --- a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/50_on_failure.yml +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/50_on_failure.yml @@ -122,7 +122,7 @@ teardown: --- "Test pipeline with empty on_failure in a processor": - do: - catch: request + catch: bad_request ingest.put_pipeline: id: "my_pipeline" body: > @@ -155,7 +155,7 @@ teardown: --- "Test pipeline with empty on_failure in pipeline": - do: - catch: request + catch: bad_request ingest.put_pipeline: id: "my_pipeline" body: > diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/90_simulate.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/90_simulate.yml index 8b08535c12494..8b3ed313314bb 100644 --- a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/90_simulate.yml +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/90_simulate.yml @@ -79,7 +79,7 @@ teardown: --- "Test simulate with provided invalid pipeline definition": - do: - catch: request + catch: bad_request ingest.simulate: body: > { @@ -183,7 +183,7 @@ teardown: --- "Test simulate with no provided pipeline or pipeline_id": - do: - catch: request + catch: bad_request ingest.simulate: body: > { @@ -206,7 +206,7 @@ teardown: --- "Test simulate with invalid processor config": - do: - catch: request + catch: bad_request ingest.simulate: body: > { diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/15_update.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/15_update.yml index a64ad904c4963..0e319be97bf0b 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/15_update.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/15_update.yml @@ -124,7 +124,7 @@ count: 1 - do: - catch: request + catch: bad_request update: index: test_1 type: test diff --git a/qa/smoke-test-reindex-with-all-modules/src/test/resources/rest-api-spec/test/update_by_query/10_script.yml b/qa/smoke-test-reindex-with-all-modules/src/test/resources/rest-api-spec/test/update_by_query/10_script.yml index c4414229d7efa..b43af1fc07e90 100644 --- a/qa/smoke-test-reindex-with-all-modules/src/test/resources/rest-api-spec/test/update_by_query/10_script.yml +++ b/qa/smoke-test-reindex-with-all-modules/src/test/resources/rest-api-spec/test/update_by_query/10_script.yml @@ -302,7 +302,7 @@ indices.refresh: {} - do: - catch: request + catch: bad_request update_by_query: refresh: true index: twitter diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc index c822b665aebfc..c93873a5be429 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc @@ -163,6 +163,7 @@ be caught and tested. For instance: The argument to `catch` can be any of: [horizontal] +`bad_request`:: a 400 response from ES `unauthorized`:: a 401 response from ES `forbidden`:: a 403 response from ES `missing`:: a 404 response from ES diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.segments/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.segments/10_basic.yml index 0ae24068e60bf..3a05a9baa75fb 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.segments/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.segments/10_basic.yml @@ -103,7 +103,7 @@ index: index1 - do: - catch: request + catch: bad_request cat.segments: index: index1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/count/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/count/10_basic.yml index f38d2c315eb78..32256811e0f51 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/count/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/count/10_basic.yml @@ -58,7 +58,7 @@ setup: --- "count body without query element": - do: - catch: request + catch: bad_request count: index: test body: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/30_internal_version.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/30_internal_version.yml index e220d98816161..afd5ea134fe64 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/30_internal_version.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/30_internal_version.yml @@ -26,7 +26,7 @@ reason: validation logic only fixed from 5.1.2 onwards - do: - catch: request + catch: bad_request create: index: test type: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/35_external_version.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/35_external_version.yml index e29690fe8d03b..ac1f1adcc94a7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/35_external_version.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/35_external_version.yml @@ -6,7 +6,7 @@ reason: validation logic only fixed from 5.1.2 onwards - do: - catch: request + catch: bad_request create: index: test type: test @@ -20,7 +20,7 @@ - match: { error.reason: "Validation Failed: 1: create operations only support internal versioning. use index instead;" } - do: - catch: request + catch: bad_request create: index: test type: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/exists/30_parent.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/exists/30_parent.yml index 91fdf027c131f..4c92605756a37 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/exists/30_parent.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/exists/30_parent.yml @@ -31,7 +31,7 @@ setup: "Parent omitted": - do: - catch: request + catch: bad_request exists: index: test_1 type: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/10_basic.yml index b5a9212d36b52..5f211435ae976 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/10_basic.yml @@ -56,7 +56,7 @@ setup: --- "Explain body without query element": - do: - catch: request + catch: bad_request explain: index: test_1 type: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/30_parent.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/30_parent.yml index 353dce8fab7da..04f578b88d6e6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/30_parent.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/30_parent.yml @@ -34,7 +34,7 @@ setup: --- "Parent omitted": - do: - catch: request + catch: bad_request get: index: test_1 type: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/30_parent.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/30_parent.yml index 8c1088e19bb39..fe589c9823472 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/30_parent.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/30_parent.yml @@ -32,7 +32,7 @@ setup: "Parent omitted": - do: - catch: request + catch: bad_request get_source: index: test_1 type: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/10_with_id.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/10_with_id.yml index 8ac55ec79f626..daac81849fb5e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/10_with_id.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/10_with_id.yml @@ -26,7 +26,7 @@ - match: { _source: { foo: bar }} - do: - catch: request + catch: bad_request index: index: idx type: type diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete/10_basic.yml index 40486da9e7e76..783e65001eff0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete/10_basic.yml @@ -14,7 +14,7 @@ setup: version: " - 5.99.0" reason: delete index doesn't support aliases only from 6.0.0 on - do: - catch: request + catch: bad_request indices.delete: index: alias - do: @@ -42,7 +42,7 @@ setup: version: " - 5.99.0" reason: delete index doesn't support aliases only from 6.0.0 on - do: - catch: request + catch: bad_request indices.delete: index: alias,index2 - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/10_basic.yml index 86a3a441539ab..992d933326d58 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/10_basic.yml @@ -20,7 +20,7 @@ index: test_index - do: - catch: request + catch: bad_request search: index: test_index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/20_multiple_indices.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/20_multiple_indices.yml index 181e010c95c19..944338123d139 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/20_multiple_indices.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/20_multiple_indices.yml @@ -32,7 +32,7 @@ setup: index: _all - do: - catch: request + catch: bad_request search: index: test_index2 @@ -59,7 +59,7 @@ setup: index: test_* - do: - catch: request + catch: bad_request search: index: test_index2 @@ -86,7 +86,7 @@ setup: index: '*' - do: - catch: request + catch: bad_request search: index: test_index3 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/10_basic.yml index 5527c023b13a3..32a5be627658b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/10_basic.yml @@ -39,7 +39,7 @@ index: test_index - do: - catch: request + catch: bad_request indices.put_alias: index: test_index name: test_* @@ -55,7 +55,7 @@ index: foo - do: - catch: request + catch: bad_request indices.put_alias: index: test_index name: foo diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_template/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_template/10_basic.yml index 01bd7afc582b8..e2b15f3bde255 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_template/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_template/10_basic.yml @@ -100,7 +100,7 @@ - match: {test.settings: {index.number_of_shards: '1', index.number_of_replicas: '0'}} - do: - catch: request + catch: bad_request indices.put_template: name: test create: true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.segments/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.segments/10_basic.yml index 3ad2a3683320e..64d94535a9cb5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.segments/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.segments/10_basic.yml @@ -66,7 +66,7 @@ index: index1 - do: - catch: request + catch: bad_request indices.segments: index: index1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/10_index.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/10_index.yml index b7724e062836e..a0e131024b60f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/10_index.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/10_index.yml @@ -106,7 +106,7 @@ setup: version: " - 5.0.99" reason: strict stats handling does not exist in 5.0 - do: - catch: request + catch: bad_request indices.stats: metric: [ fieldata ] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yml index 62664319d8a43..07f32ff413211 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yml @@ -27,7 +27,7 @@ version: " - 5.0.99" reason: strict stats handling does not exist in 5.0 - do: - catch: request + catch: bad_request nodes.stats: metric: [ transprot ] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/100_avg_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/100_avg_metric.yml index 2b5201d0410ce..a17bdade6560d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/100_avg_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/100_avg_metric.yml @@ -166,7 +166,7 @@ setup: "Aggregating wrong datatype test": - do: - catch: request + catch: bad_request search: body: aggs: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/110_max_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/110_max_metric.yml index abfbcbc394865..30b0bafe3b031 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/110_max_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/110_max_metric.yml @@ -166,7 +166,7 @@ setup: "Aggregating wrong datatype test": - do: - catch: request + catch: bad_request search: body: aggs: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/120_min_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/120_min_metric.yml index 5e1edaf845473..f56719dfe6e54 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/120_min_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/120_min_metric.yml @@ -166,7 +166,7 @@ setup: "Aggregating wrong datatype test": - do: - catch: request + catch: bad_request search: body: aggs: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/130_sum_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/130_sum_metric.yml index 1ce2c013863f4..9fbb15fdab3df 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/130_sum_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/130_sum_metric.yml @@ -166,7 +166,7 @@ setup: "Aggregating wrong datatype test": - do: - catch: request + catch: bad_request search: body: aggs: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/180_percentiles_tdigest_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/180_percentiles_tdigest_metric.yml index fd967677311b8..1b985c668933f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/180_percentiles_tdigest_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/180_percentiles_tdigest_metric.yml @@ -288,7 +288,7 @@ setup: percents: [] - do: - catch: request + catch: bad_request search: body: aggs: @@ -298,7 +298,7 @@ setup: percents: null - do: - catch: request + catch: bad_request search: body: aggs: @@ -308,7 +308,7 @@ setup: percents: ["foo"] - do: - catch: request + catch: bad_request search: body: aggs: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/190_percentiles_hdr_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/190_percentiles_hdr_metric.yml index d959eefed820b..1d527efbf8a29 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/190_percentiles_hdr_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/190_percentiles_hdr_metric.yml @@ -298,7 +298,7 @@ setup: number_of_significant_value_digits: 10 - do: - catch: request + catch: bad_request search: body: aggs: @@ -320,7 +320,7 @@ setup: percents: [] - do: - catch: request + catch: bad_request search: body: aggs: @@ -331,7 +331,7 @@ setup: percents: null - do: - catch: request + catch: bad_request search: body: aggs: @@ -342,7 +342,7 @@ setup: percents: ["foo"] - do: - catch: request + catch: bad_request search: body: aggs: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/20_default_values.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/20_default_values.yml index 5cdde2cb6965d..52fbd19185335 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/20_default_values.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/20_default_values.yml @@ -69,7 +69,7 @@ setup: "Search body without query element": - do: - catch: request + catch: bad_request search: body: match: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/issue4895.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/issue4895.yml index 993cbed26475d..96a2ca4854a18 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/issue4895.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/issue4895.yml @@ -22,7 +22,7 @@ setup: "Test with _local preference placed in query body - should fail": - do: - catch: request + catch: bad_request search: index: test type: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/issue9606.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/issue9606.yml index 5421ae56a9ee3..3e46531c034eb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/issue9606.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/issue9606.yml @@ -19,7 +19,7 @@ setup: "Test search_type=query_and_fetch not supported from REST layer": - do: - catch: request + catch: bad_request search: index: test type: test @@ -33,7 +33,7 @@ setup: "Test search_type=dfs_query_and_fetch not supported from REST layer": - do: - catch: request + catch: bad_request search: index: test type: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/50_parent.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/50_parent.yml index 82508f951e04c..e65f80d705cb2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/50_parent.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/50_parent.yml @@ -65,7 +65,7 @@ setup: body: { foo: bar } - do: - catch: request + catch: bad_request update: index: test_1 type: test diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index b906090d08fd0..d509b6685a290 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -320,6 +320,7 @@ private String formatStatusCodeMessage(ClientYamlTestResponse restTestResponse, private static Map>> catches = new HashMap<>(); static { + catches.put("bad_request", tuple("400", equalTo(400))); catches.put("unauthorized", tuple("401", equalTo(401))); catches.put("forbidden", tuple("403", equalTo(403))); catches.put("missing", tuple("404", equalTo(404))); @@ -327,6 +328,7 @@ private String formatStatusCodeMessage(ClientYamlTestResponse restTestResponse, catches.put("conflict", tuple("409", equalTo(409))); catches.put("unavailable", tuple("503", equalTo(503))); catches.put("request", tuple("4xx|5xx", allOf(greaterThanOrEqualTo(400), + not(equalTo(400)), not(equalTo(401)), not(equalTo(403)), not(equalTo(404)), From ffc9999567bceceac2d5df43d49cbd5ae614dc37 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Thu, 14 Sep 2017 23:19:26 +0300 Subject: [PATCH 37/67] fix StartRecoveryRequestTests.testSerialization --- .../indices/recovery/StartRecoveryRequestTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java index 14799687d232b..bb1aac89f3e8f 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java @@ -51,7 +51,7 @@ public void testSerialization() throws Exception { UUIDs.randomBase64UUID(), new DiscoveryNode("a", buildNewFakeTransportAddress(), emptyMap(), emptySet(), targetNodeVersion), new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), targetNodeVersion), - Store.MetadataSnapshot.EMPTY, + metadataSnapshot, randomBoolean(), randomNonNegativeLong(), randomBoolean() || metadataSnapshot.getHistoryUUID() == null ? From a99803f89ab308a90e1aca412462d3c4bee1d3a0 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Thu, 14 Sep 2017 21:58:24 +0300 Subject: [PATCH 38/67] enable bwc testing --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index 7b1e517a8586b..cfc8401a934e0 100644 --- a/build.gradle +++ b/build.gradle @@ -186,7 +186,7 @@ task verifyVersions { * after the backport of the backcompat code is complete. */ allprojects { - ext.bwc_tests_enabled = false + ext.bwc_tests_enabled = true } task verifyBwcTestsEnabled { From 120ddd99c34f0e447d5688324b51934184e9c88f Mon Sep 17 00:00:00 2001 From: lcawley Date: Thu, 14 Sep 2017 16:18:29 -0700 Subject: [PATCH 39/67] [DOCS] Remove edit link from ML node --- docs/reference/modules.asciidoc | 2 +- docs/reference/modules/node.asciidoc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/reference/modules.asciidoc b/docs/reference/modules.asciidoc index ec6bd4593f5d0..548f29e57a5cd 100644 --- a/docs/reference/modules.asciidoc +++ b/docs/reference/modules.asciidoc @@ -99,6 +99,7 @@ include::modules/network.asciidoc[] include::modules/node.asciidoc[] +:edit_url: include::modules/plugins.asciidoc[] include::modules/scripting.asciidoc[] @@ -112,4 +113,3 @@ include::modules/transport.asciidoc[] include::modules/tribe.asciidoc[] include::modules/cross-cluster-search.asciidoc[] - diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc index 4b8f8f4a02581..b9ff8dc407e5b 100644 --- a/docs/reference/modules/node.asciidoc +++ b/docs/reference/modules/node.asciidoc @@ -325,5 +325,6 @@ the <>, the <> and the <>. ifdef::include-xpack[] +:edit_url!: include::{xes-repo-dir}/node.asciidoc[] endif::include-xpack[] From e9deb625467e0cc8c56c331676f3218933789ffd Mon Sep 17 00:00:00 2001 From: Itamar Syn-Hershko Date: Fri, 15 Sep 2017 04:10:12 +0300 Subject: [PATCH 40/67] Better message text for ResponseException This avoids messages with malformed URLs, like "org.elasticsearch.client.ResponseException: PUT http://127.0.0.1:9502customer: HTTP/1.1 400 Bad Request". Relates #26564 --- .../org/elasticsearch/client/ResponseException.java | 10 ++++++++-- .../elasticsearch/client/ResponseExceptionTests.java | 11 +++++++++-- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/client/rest/src/main/java/org/elasticsearch/client/ResponseException.java b/client/rest/src/main/java/org/elasticsearch/client/ResponseException.java index 2f22bfd50f472..072e45ffb0e97 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/ResponseException.java +++ b/client/rest/src/main/java/org/elasticsearch/client/ResponseException.java @@ -24,6 +24,7 @@ import org.apache.http.util.EntityUtils; import java.io.IOException; +import java.util.Locale; /** * Exception thrown when an elasticsearch node responds to a request with a status code that indicates an error. @@ -39,8 +40,13 @@ public ResponseException(Response response) throws IOException { } private static String buildMessage(Response response) throws IOException { - String message = response.getRequestLine().getMethod() + " " + response.getHost() + response.getRequestLine().getUri() - + ": " + response.getStatusLine().toString(); + String message = String.format(Locale.ROOT, + "method [%s], host [%s], URI [%s], status line [%s]", + response.getRequestLine().getMethod(), + response.getHost(), + response.getRequestLine().getUri(), + response.getStatusLine().toString() + ); HttpEntity entity = response.getEntity(); if (entity != null) { diff --git a/client/rest/src/test/java/org/elasticsearch/client/ResponseExceptionTests.java b/client/rest/src/test/java/org/elasticsearch/client/ResponseExceptionTests.java index 1638693a44f5e..6cf7e68b98800 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/ResponseExceptionTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/ResponseExceptionTests.java @@ -36,6 +36,7 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.util.Locale; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; @@ -74,8 +75,14 @@ public void testResponseException() throws IOException { assertNull(responseException.getResponse().getEntity()); } - String message = response.getRequestLine().getMethod() + " " + response.getHost() + response.getRequestLine().getUri() - + ": " + response.getStatusLine().toString(); + String message = String.format(Locale.ROOT, + "method [%s], host [%s], URI [%s], status line [%s]", + response.getRequestLine().getMethod(), + response.getHost(), + response.getRequestLine().getUri(), + response.getStatusLine().toString() + ); + if (hasBody) { message += "\n" + responseBody; } From 3d5f70790a1ba9a0488513f3d79610595abe8469 Mon Sep 17 00:00:00 2001 From: markwalkom Date: Fri, 15 Sep 2017 19:11:55 +1000 Subject: [PATCH 41/67] [Docs] Update ingest.asciidoc (#26599) Added a brief note to clarify where configured pipelines are stored (cluster state). --- docs/reference/ingest.asciidoc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/reference/ingest.asciidoc b/docs/reference/ingest.asciidoc index 95ca46b421017..463e47dfde950 100644 --- a/docs/reference/ingest.asciidoc +++ b/docs/reference/ingest.asciidoc @@ -18,7 +18,8 @@ node.ingest: false To pre-process documents before indexing, you <> that specifies a series of <>. Each processor transforms the document in some way. For example, you may have a pipeline that consists of one processor that removes a field from -the document followed by another processor that renames a field. +the document followed by another processor that renames a field. Configured pipelines are then stored +in the <>. To use a pipeline, you simply specify the `pipeline` parameter on an index or bulk request to tell the ingest node which pipeline to use. For example: From 7f74a620a1b58737e09cc1718153611e6efb8eb2 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 15 Sep 2017 11:23:57 +0200 Subject: [PATCH 42/67] [Docs] Add description for missing fields in Reindex/Update/Delete By Query (#26618) This commit adds some missing description for some fields in the Reindex/UBQ/DBQ responses. --- docs/reference/docs/delete-by-query.asciidoc | 31 ++++++++++++++++++-- docs/reference/docs/reindex.asciidoc | 31 +++++++++++++++++++- docs/reference/docs/update-by-query.asciidoc | 31 +++++++++++++++++++- 3 files changed, 89 insertions(+), 4 deletions(-) diff --git a/docs/reference/docs/delete-by-query.asciidoc b/docs/reference/docs/delete-by-query.asciidoc index b15604a468eca..0aea249d899e4 100644 --- a/docs/reference/docs/delete-by-query.asciidoc +++ b/docs/reference/docs/delete-by-query.asciidoc @@ -212,6 +212,7 @@ The JSON response looks like this: { "took" : 147, "timed_out": false, + "total": 119, "deleted": 119, "batches": 1, "version_conflicts": 0, @@ -223,7 +224,6 @@ The JSON response looks like this: "throttled_millis": 0, "requests_per_second": -1.0, "throttled_until_millis": 0, - "total": 119, "failures" : [ ] } -------------------------------------------------- @@ -233,6 +233,15 @@ The JSON response looks like this: The number of milliseconds from start to end of the whole operation. +`timed_out`:: + +This flag is set to `true` if any of the requests executed during the +delete by query execution has timed out. + +`total`:: + +The number of documents that were successfully processed. + `deleted`:: The number of documents that were successfully deleted. @@ -245,14 +254,32 @@ The number of scroll responses pulled back by the delete by query. The number of version conflicts that the delete by query hit. +`noops`:: + +This field is always equal to zero for delete by query. It only exists +so that delete by query, update by query and reindex APIs return responses + with the same structure. + `retries`:: -The number of retries that the delete by query did in response to a full queue. +The number of retries attempted by delete by query. `bulk` is the number +of bulk actions retried and `search` is the number of search actions retried. `throttled_millis`:: Number of milliseconds the request slept to conform to `requests_per_second`. +`requests_per_second`:: + +The number of requests per second effectively executed during the delete by query. + +`throttled_until_millis`:: + +This field should always be equal to zero in a delete by query response. It only +has meaning when using the <>, where it +indicates the next time (in milliseconds since epoch) a throttled request will be +executed again in order to conform to `requests_per_second`. + `failures`:: Array of all indexing failures. If this is non-empty then the request aborted diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index 8ebf7002a891b..e1876327504e2 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -585,7 +585,7 @@ The JSON response looks like this: "timed_out": false, "total": 5, "updated": 0, - "created": 123, + "created": 5, "deleted": 0, "batches": 1, "noops": 0, @@ -606,6 +606,15 @@ The JSON response looks like this: The number of milliseconds from start to end of the whole operation. +`timed_out`:: + +This flag is set to `true` if any of the requests executed during the +reindex has timed out. + +`total`:: + +The number of documents that were successfully processed. + `updated`:: The number of documents that were successfully updated. @@ -614,10 +623,19 @@ The number of documents that were successfully updated. The number of documents that were successfully created. +`deleted`:: + +The number of documents that were successfully deleted. + `batches`:: The number of scroll responses pulled back by the reindex. +`noops`:: + +The number of documents that were ignored because the script used for +the reindex returned a `noop` value for `ctx.op`. + `version_conflicts`:: The number of version conflicts that reindex hit. @@ -631,6 +649,17 @@ actions retried and `search` is the number of search actions retried. Number of milliseconds the request slept to conform to `requests_per_second`. +`requests_per_second`:: + +The number of requests per second effectively executed during the reindex. + +`throttled_until_millis`:: + +This field should always be equal to zero in a delete by query response. It only +has meaning when using the <>, where it +indicates the next time (in milliseconds since epoch) a throttled request will be +executed again in order to conform to `requests_per_second`. + `failures`:: Array of all indexing failures. If this is non-empty then the request aborted diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index 326bbc216adee..2aee8faadcfd5 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -262,6 +262,7 @@ The JSON response looks like this: { "took" : 147, "timed_out": false, + "total": 5, "updated": 5, "deleted": 0, "batches": 1, @@ -274,7 +275,6 @@ The JSON response looks like this: "throttled_millis": 0, "requests_per_second": -1.0, "throttled_until_millis": 0, - "total": 5, "failures" : [ ] } -------------------------------------------------- @@ -284,10 +284,23 @@ The JSON response looks like this: The number of milliseconds from start to end of the whole operation. +`timed_out`:: + +This flag is set to `true` if any of the requests executed during the +update by query execution has timed out. + +`total`:: + +The number of documents that were successfully processed. + `updated`:: The number of documents that were successfully updated. +`deleted`:: + +The number of documents that were successfully deleted. + `batches`:: The number of scroll responses pulled back by the update by query. @@ -296,6 +309,11 @@ The number of scroll responses pulled back by the update by query. The number of version conflicts that the update by query hit. +`noops`:: + +The number of documents that were ignored because the script used for +the update by query returned a `noop` value for `ctx.op`. + `retries`:: The number of retries attempted by update-by-query. `bulk` is the number of bulk @@ -305,6 +323,17 @@ actions retried and `search` is the number of search actions retried. Number of milliseconds the request slept to conform to `requests_per_second`. +`requests_per_second`:: + +The number of requests per second effectively executed during the update by query. + +`throttled_until_millis`:: + +This field should always be equal to zero in a delete by query response. It only +has meaning when using the <>, where it +indicates the next time (in milliseconds since epoch) a throttled request will be +executed again in order to conform to `requests_per_second`. + `failures`:: Array of all indexing failures. If this is non-empty then the request aborted From 7184cf8b5b34f4f25a92f35bf690ce3faf4baa70 Mon Sep 17 00:00:00 2001 From: Claudio Bley Date: Fri, 15 Sep 2017 12:25:09 +0200 Subject: [PATCH 43/67] Fix kuromoji default stoptags (#26600) Initialize the default stop-tags in `KuromojiPartOfSpeechFilterFactory` if the `stoptags` are not given in the config. Also adding a test which checks that part-of-speech tokens are removed when using the kuromoji_part_of_speech filter. --- .../KuromojiPartOfSpeechFilterFactory.java | 3 +++ .../index/analysis/KuromojiAnalysisTests.java | 17 ++++++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiPartOfSpeechFilterFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiPartOfSpeechFilterFactory.java index e3a58360e9b5f..bea12470cb026 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiPartOfSpeechFilterFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiPartOfSpeechFilterFactory.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.ja.JapaneseAnalyzer; import org.apache.lucene.analysis.ja.JapanesePartOfSpeechStopFilter; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -38,6 +39,8 @@ public KuromojiPartOfSpeechFilterFactory(IndexSettings indexSettings, Environmen List wordList = Analysis.getWordList(env, settings, "stoptags"); if (wordList != null) { stopTags.addAll(wordList); + } else { + stopTags.addAll(JapaneseAnalyzer.getDefaultStopTags()); } } diff --git a/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java b/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java index c0271c997849f..1be8a22fb2aaf 100644 --- a/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java +++ b/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java @@ -93,6 +93,21 @@ public void testBaseFormFilterFactory() throws IOException { assertSimpleTSOutput(tokenFilter.create(tokenizer), expected); } + public void testPartOfSpeechFilter() throws IOException { + TestAnalysis analysis = createTestAnalysis(); + TokenFilterFactory tokenFilter = analysis.tokenFilter.get("kuromoji_part_of_speech"); + + assertThat(tokenFilter, instanceOf(KuromojiPartOfSpeechFilterFactory.class)); + + String source = "寿司がおいしいね"; + String[] expected_tokens = new String[]{"寿司", "おいしい"}; + + Tokenizer tokenizer = new JapaneseTokenizer(null, true, JapaneseTokenizer.Mode.SEARCH); + tokenizer.setReader(new StringReader(source)); + + assertSimpleTSOutput(tokenFilter.create(tokenizer), expected_tokens); + } + public void testReadingFormFilterFactory() throws IOException { TestAnalysis analysis = createTestAnalysis(); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("kuromoji_rf"); @@ -208,7 +223,7 @@ public static void assertSimpleTSOutput(TokenStream stream, int i = 0; while (stream.incrementToken()) { assertThat(expected.length, greaterThan(i)); - assertThat( "expected different term at index " + i, expected[i++], equalTo(termAttr.toString())); + assertThat("expected different term at index " + i, termAttr.toString(), equalTo(expected[i++])); } assertThat("not all tokens produced", i, equalTo(expected.length)); } From 0f2a11695e9c8c9ebf156f421ccd078d0bbe609b Mon Sep 17 00:00:00 2001 From: kel Date: Fri, 15 Sep 2017 20:01:35 +0800 Subject: [PATCH 44/67] Filter unsupported relation for range query builder (#26620) --- .../index/query/RangeQueryBuilder.java | 13 ++++++++++ .../index/query/RangeQueryBuilderTests.java | 26 +++++++++++++++++++ 2 files changed, 39 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java index 0d4ded10d2cbb..35350a8480ec7 100644 --- a/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java @@ -115,10 +115,20 @@ public RangeQueryBuilder(StreamInput in) throws IOException { String relationString = in.readOptionalString(); if (relationString != null) { relation = ShapeRelation.getRelationByName(relationString); + if (relation != null && !isRelationAllowed(relation)) { + throw new IllegalArgumentException( + "[range] query does not support relation [" + relationString + "]"); + } } } } + private boolean isRelationAllowed(ShapeRelation relation) { + return relation == ShapeRelation.INTERSECTS + || relation == ShapeRelation.CONTAINS + || relation == ShapeRelation.WITHIN; + } + @Override protected void doWriteTo(StreamOutput out) throws IOException { out.writeString(this.fieldName); @@ -317,6 +327,9 @@ public RangeQueryBuilder relation(String relation) { if (this.relation == null) { throw new IllegalArgumentException(relation + " is not a valid relation"); } + if (!isRelationAllowed(this.relation)) { + throw new IllegalArgumentException("[range] query does not support relation [" + relation + "]"); + } return this; } diff --git a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java index a3f90498b52a2..67c3e67d39e84 100644 --- a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java @@ -31,6 +31,7 @@ import org.apache.lucene.search.TermRangeQuery; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.FieldNamesFieldMapper; @@ -535,4 +536,29 @@ public void testParseFailsWithMultipleFieldsWhenOneIsDate() { ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(json)); assertEquals("[range] query doesn't support multiple fields, found [age] and [" + DATE_FIELD_NAME + "]", e.getMessage()); } + + public void testParseRelation() { + String json = + "{\n" + + " \"range\": {\n" + + " \"age\": {\n" + + " \"gte\": 30,\n" + + " \"lte\": 40,\n" + + " \"relation\": \"disjoint\"\n" + + " }" + + " }\n" + + " }"; + String fieldName = randomAlphaOfLengthBetween(1, 20); + IllegalArgumentException e1 = expectThrows(IllegalArgumentException.class, () -> parseQuery(json)); + assertEquals("[range] query does not support relation [disjoint]", e1.getMessage()); + RangeQueryBuilder builder = new RangeQueryBuilder(fieldName); + IllegalArgumentException e2 = expectThrows(IllegalArgumentException.class, ()->builder.relation("disjoint")); + assertEquals("[range] query does not support relation [disjoint]", e2.getMessage()); + builder.relation("contains"); + assertEquals(ShapeRelation.CONTAINS, builder.relation()); + builder.relation("within"); + assertEquals(ShapeRelation.WITHIN, builder.relation()); + builder.relation("intersects"); + assertEquals(ShapeRelation.INTERSECTS, builder.relation()); + } } From b789ce737b405236caa97a8905ffde6c3f63b2ff Mon Sep 17 00:00:00 2001 From: Dimitrios Liappis Date: Fri, 15 Sep 2017 16:14:47 +0300 Subject: [PATCH 45/67] Docs: Use single-node discovery.type for dev example For the single node, dev example, the `discovery.type=single-node`[1],[2] is a perfect fit and makes the example shorter and more self explanatory. Also expose the transport port, to help with dev use-cases using the transport client. [1] https://github.com/elastic/elasticsearch/pull/23595 [2] https://github.com/elastic/elasticsearch/pull/23598 Relates #26289 --- docs/reference/setup/install/docker.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 3090849f9d870..017aae350033d 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -49,7 +49,7 @@ Elasticsearch can be quickly started for development or testing use with the fol ["source","sh",subs="attributes"] -------------------------------------------- -docker run -p 9200:9200 -e "http.host=0.0.0.0" -e "transport.host=127.0.0.1" {docker-image} +docker run -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" {docker-image} -------------------------------------------- endif::[] From 296c2396114a07d67ef4ee951eff1d2d41e93f5f Mon Sep 17 00:00:00 2001 From: Michael Basnight Date: Fri, 15 Sep 2017 17:00:41 -0500 Subject: [PATCH 46/67] Add check for invalid index in WildcardExpressionResolver (#26409) This commit adds validation to the resolving of indexes in the wildcard expression resolver. It no longer throws a 404 Not Found when resolving invalid indices. It throws a 400 instead, as it is an invalid index. This was the behavior of 5.x. --- .../metadata/IndexNameExpressionResolver.java | 12 ++++++++++++ .../metadata/IndexNameExpressionResolverTests.java | 13 ++++++++++++- .../rest-api-spec/test/indices.get/10_basic.yml | 8 ++++++++ 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 17b43efdf9d85..6dc92a44bb08b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -34,6 +34,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.indices.IndexClosedException; +import org.elasticsearch.indices.InvalidIndexNameException; import org.joda.time.DateTimeZone; import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormatter; @@ -601,6 +602,7 @@ private Set innerResolve(Context context, List expressions, Indi if (Strings.isEmpty(expression)) { throw indexNotFoundException(expression); } + validateAliasOrIndex(expression); if (aliasOrIndexExists(options, metaData, expression)) { if (result != null) { result.add(expression); @@ -654,6 +656,16 @@ private Set innerResolve(Context context, List expressions, Indi return result; } + private static void validateAliasOrIndex(String expression) { + // Expressions can not start with an underscore. This is reserved for APIs. If the check gets here, the API + // does not exist and the path is interpreted as an expression. If the expression begins with an underscore, + // throw a specific error that is different from the [[IndexNotFoundException]], which is typically thrown + // if the expression can't be found. + if (expression.charAt(0) == '_') { + throw new InvalidIndexNameException(expression, "must not start with '_'."); + } + } + private static boolean aliasOrIndexExists(IndicesOptions options, MetaData metaData, String expression) { AliasOrIndex aliasOrIndex = metaData.getAliasAndIndexLookup().get(expression); //treat aliases as unavailable indices when ignoreAliases is set to true (e.g. delete index and update aliases api) diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java index 5e04714552248..0530bd617af63 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.indices.IndexClosedException; +import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.test.ESTestCase; import java.util.Arrays; @@ -641,7 +642,7 @@ public void testConcreteIndicesWildcardAndAliases() { // when ignoreAliases option is set, concreteIndexNames resolves the provided expressions // only against the defined indices IndicesOptions ignoreAliasesOptions = IndicesOptions.fromOptions(false, false, true, false, true, false, true); - + String[] indexNamesIndexWildcard = indexNameExpressionResolver.concreteIndexNames(state, ignoreAliasesOptions, "foo*"); assertEquals(1, indexNamesIndexWildcard.length); @@ -1126,4 +1127,14 @@ public void testIndicesAliasesRequestIgnoresAliases() { assertEquals("test-index", indices[0]); } } + + public void testInvalidIndex() { + MetaData.Builder mdBuilder = MetaData.builder().put(indexBuilder("test")); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); + + InvalidIndexNameException iine = expectThrows(InvalidIndexNameException.class, + () -> indexNameExpressionResolver.concreteIndexNames(context, "_foo")); + assertEquals("Invalid index name [_foo], must not start with '_'.", iine.getMessage()); + } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/10_basic.yml index b6ac97eb91bfd..943cbcf65d144 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/10_basic.yml @@ -160,3 +160,11 @@ setup: - is_true: test_index_2.settings - is_true: test_index_3.settings +--- +"Should return an exception when querying invalid indices": + + - do: + catch: bad_request + indices.get: + index: _foo + From 0814ea3200d6af6c142ad7d2fa16bce5acfa9665 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Sat, 16 Sep 2017 10:43:48 +0300 Subject: [PATCH 47/67] fix testSniffNodes to use the new error message relates to #26564 --- .../client/sniff/ElasticsearchHostsSnifferTests.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java index 5221b205dd4f3..483b7df62f95a 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java @@ -55,6 +55,7 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.startsWith; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; import static org.junit.Assert.fail; @@ -128,7 +129,9 @@ public void testSniffNodes() throws IOException { } catch(ResponseException e) { Response response = e.getResponse(); if (sniffResponse.isFailure) { - assertThat(e.getMessage(), containsString("GET " + httpHost + "/_nodes/http?timeout=" + sniffRequestTimeout + "ms")); + final String errorPrefix = "method [GET], host [" + httpHost + "], URI [/_nodes/http?timeout=" + sniffRequestTimeout + + "ms], status line [HTTP/1.1"; + assertThat(e.getMessage(), startsWith(errorPrefix)); assertThat(e.getMessage(), containsString(Integer.toString(sniffResponse.nodesInfoResponseCode))); assertThat(response.getHost(), equalTo(httpHost)); assertThat(response.getStatusLine().getStatusCode(), equalTo(sniffResponse.nodesInfoResponseCode)); From bdd9953aa4f677862777a1684e1ee56abc8e6598 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sat, 16 Sep 2017 11:00:33 -0400 Subject: [PATCH 48/67] Fix discovery-file plugin to use custom config path The discovery-file plugin was not config path aware, so it always picked up the default config path (from Elasticsearch home) rather than a custom config path. This commit fixes the discovery-file plugin to respect a custom config path. Relates #26662 --- .../file/FileBasedDiscoveryPlugin.java | 8 ++++++-- .../file/FileBasedUnicastHostsProvider.java | 6 +++--- .../file/FileBasedDiscoveryPluginTests.java | 6 ++++-- .../FileBasedUnicastHostsProviderTests.java | 18 +++++++++++++----- 4 files changed, 26 insertions(+), 12 deletions(-) diff --git a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java index 0cd8176df83f5..2cda88f796ed9 100644 --- a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java +++ b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java @@ -42,6 +42,7 @@ import org.elasticsearch.watcher.ResourceWatcherService; import java.io.IOException; +import java.nio.file.Path; import java.util.Collection; import java.util.Collections; import java.util.Map; @@ -61,10 +62,12 @@ public class FileBasedDiscoveryPlugin extends Plugin implements DiscoveryPlugin private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); private final Settings settings; + private final Path configPath; private ExecutorService fileBasedDiscoveryExecutorService; - public FileBasedDiscoveryPlugin(Settings settings) { + public FileBasedDiscoveryPlugin(Settings settings, Path configPath) { this.settings = settings; + this.configPath = configPath; } @Override @@ -96,7 +99,8 @@ public Map> getZenHostsProviders(Transpor NetworkService networkService) { return Collections.singletonMap( "file", - () -> new FileBasedUnicastHostsProvider(settings, transportService, fileBasedDiscoveryExecutorService)); + () -> new FileBasedUnicastHostsProvider( + new Environment(settings, configPath), transportService, fileBasedDiscoveryExecutorService)); } @Override diff --git a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java index 196e98d658217..ee5f6c08b91ce 100644 --- a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java +++ b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java @@ -71,11 +71,11 @@ class FileBasedUnicastHostsProvider extends AbstractComponent implements Unicast private final TimeValue resolveTimeout; - FileBasedUnicastHostsProvider(Settings settings, TransportService transportService, ExecutorService executorService) { - super(settings); + FileBasedUnicastHostsProvider(Environment environment, TransportService transportService, ExecutorService executorService) { + super(environment.settings()); this.transportService = transportService; this.executorService = executorService; - this.unicastHostsFilePath = new Environment(settings).configFile().resolve("discovery-file").resolve(UNICAST_HOSTS_FILE); + this.unicastHostsFilePath = environment.configFile().resolve("discovery-file").resolve(UNICAST_HOSTS_FILE); this.resolveTimeout = DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT.get(settings); } diff --git a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPluginTests.java b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPluginTests.java index 7a7ee9dbd037e..838d53d2d6221 100644 --- a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPluginTests.java +++ b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPluginTests.java @@ -24,11 +24,12 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.nio.file.Path; public class FileBasedDiscoveryPluginTests extends ESTestCase { public void testHostsProviderBwc() { - FileBasedDiscoveryPlugin plugin = new FileBasedDiscoveryPlugin(Settings.EMPTY); + FileBasedDiscoveryPlugin plugin = new FileBasedDiscoveryPlugin(Settings.EMPTY, createTempDir()); Settings additionalSettings = plugin.additionalSettings(); assertEquals("file", additionalSettings.get(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey())); assertWarnings("Using discovery.type setting to set hosts provider is deprecated. " + @@ -37,9 +38,10 @@ public void testHostsProviderBwc() { public void testHostsProviderExplicit() { Settings settings = Settings.builder().put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), "foo").build(); - FileBasedDiscoveryPlugin plugin = new FileBasedDiscoveryPlugin(settings); + FileBasedDiscoveryPlugin plugin = new FileBasedDiscoveryPlugin(settings, createTempDir()); assertEquals(Settings.EMPTY, plugin.additionalSettings()); assertWarnings("Using discovery.type setting to set hosts provider is deprecated. " + "Set \"discovery.zen.hosts_provider: file\" instead"); } + } diff --git a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java index 4395d16db377a..db56f9c2f8341 100644 --- a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java +++ b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java @@ -126,7 +126,8 @@ public void testUnicastHostsDoesNotExist() throws Exception { final Settings settings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .build(); - final FileBasedUnicastHostsProvider provider = new FileBasedUnicastHostsProvider(settings, transportService, executorService); + final Environment environment = new Environment(settings); + final FileBasedUnicastHostsProvider provider = new FileBasedUnicastHostsProvider(environment, transportService, executorService); final List nodes = provider.buildDynamicNodes(); assertEquals(0, nodes.size()); } @@ -152,13 +153,20 @@ private List setupAndRunHostProvider(final List hostEntri final Settings settings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), homeDir) .build(); - final Path configDir = homeDir.resolve("config").resolve("discovery-file"); - Files.createDirectories(configDir); - final Path unicastHostsPath = configDir.resolve(UNICAST_HOSTS_FILE); + final Path configPath; + if (randomBoolean()) { + configPath = homeDir.resolve("config"); + } else { + configPath = createTempDir(); + } + final Path discoveryFilePath = configPath.resolve("discovery-file"); + Files.createDirectories(discoveryFilePath); + final Path unicastHostsPath = discoveryFilePath.resolve(UNICAST_HOSTS_FILE); try (BufferedWriter writer = Files.newBufferedWriter(unicastHostsPath)) { writer.write(String.join("\n", hostEntries)); } - return new FileBasedUnicastHostsProvider(settings, transportService, executorService).buildDynamicNodes(); + return new FileBasedUnicastHostsProvider( + new Environment(settings, configPath), transportService, executorService).buildDynamicNodes(); } } From 1f9e0fd0dd7acd68616eab16eabc6bc0c65041db Mon Sep 17 00:00:00 2001 From: Peter Dyson Date: Mon, 18 Sep 2017 16:56:19 +1000 Subject: [PATCH 49/67] [Docs] improved description for fs.total.available_in_bytes (#26657) --- docs/reference/cluster/nodes-stats.asciidoc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index 793a056b3ebb9..4d748d31559ba 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -96,7 +96,9 @@ information that concern the file system: Total number of unallocated bytes in all file stores `fs.total.available_in_bytes`:: - Total number of bytes available to this Java virtual machine on all file stores + Total number of bytes available to this Java virtual machine on all file stores. + Depending on OS or process level restrictions, this might appear less than `fs.total.free_in_bytes`. + This is the actual amount of free disk space the Elasticsearch node can utilise. `fs.data`:: List of all file stores @@ -360,4 +362,4 @@ The `ingest` flag can be set to retrieve statistics that concern ingest: `ingest.total.failed`:: The total number ingest preprocessing operations failed during the lifetime of this node -On top of these overall ingest statistics, these statistics are also provided on a per pipeline basis. \ No newline at end of file +On top of these overall ingest statistics, these statistics are also provided on a per pipeline basis. From c16c653c3e9ad07ddd90d2d78ef8337cc5294750 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Mon, 18 Sep 2017 10:46:02 +0200 Subject: [PATCH 50/67] [Test] Fix reference/cat/allocation/line_8 test failure In this test, 260b is replaced by the regexp \d+b but the test sometimes produces results like 1.1kb so this commit adapts the regexp to match values with decimals --- docs/reference/cat/allocation.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/cat/allocation.asciidoc b/docs/reference/cat/allocation.asciidoc index ba702080e581a..3719758ff58e9 100644 --- a/docs/reference/cat/allocation.asciidoc +++ b/docs/reference/cat/allocation.asciidoc @@ -18,7 +18,7 @@ Might respond with: shards disk.indices disk.used disk.avail disk.total disk.percent host ip node 5 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2 -------------------------------------------------- -// TESTRESPONSE[s/260b/\\d+b/ s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/ s/46/\\d+/] +// TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/ s/46/\\d+/] // TESTRESPONSE[s/CSUXak2/.+/ _cat] Here we can see that each node has been allocated a single shard and From c238b79cf4f5ef4f71bf919a6676fb6c833f6fca Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 18 Sep 2017 06:04:44 -0400 Subject: [PATCH 51/67] Add global checkpoint tracking on the primary This commit adds local tracking of the global checkpoints on all shard copies when a global checkpoint tracker is operating in primary mode. With this, we relay the global checkpoint on a shard copy back to the primary shard during replication operations. This serves as another step towards adding a background sync of the global checkpoint to the shard copies. Relates #26666 --- build.gradle | 2 +- .../TransportResyncReplicationAction.java | 3 +- .../replication/ReplicationOperation.java | 26 +- .../TransportReplicationAction.java | 37 +- .../seqno/GlobalCheckpointSyncAction.java | 3 +- .../index/seqno/GlobalCheckpointTracker.java | 355 ++++++++++++------ .../index/seqno/SequenceNumbersService.java | 19 +- .../elasticsearch/index/shard/IndexShard.java | 27 +- .../recovery/RecoverySourceHandler.java | 4 +- .../ReplicationOperationTests.java | 28 +- .../TransportReplicationActionTests.java | 3 +- .../TransportWriteActionTests.java | 3 +- .../index/engine/InternalEngineTests.java | 5 +- .../ESIndexLevelReplicationTestCase.java | 7 +- .../RecoveryDuringReplicationTests.java | 1 - .../seqno/GlobalCheckpointTrackerTests.java | 215 +++++++---- .../elasticsearch/recovery/RelocationIT.java | 16 +- 17 files changed, 544 insertions(+), 210 deletions(-) diff --git a/build.gradle b/build.gradle index cfc8401a934e0..7b1e517a8586b 100644 --- a/build.gradle +++ b/build.gradle @@ -186,7 +186,7 @@ task verifyVersions { * after the backport of the backcompat code is complete. */ allprojects { - ext.bwc_tests_enabled = true + ext.bwc_tests_enabled = false } task verifyBwcTestsEnabled { diff --git a/core/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java b/core/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java index 5e00f78ec54a9..514cbca04cc7c 100644 --- a/core/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java @@ -93,7 +93,8 @@ protected void sendReplicaRequest( if (node.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { super.sendReplicaRequest(replicaRequest, node, listener); } else { - listener.onResponse(new ReplicaResponse(SequenceNumbersService.PRE_60_NODE_LOCAL_CHECKPOINT)); + final long pre60NodeCheckpoint = SequenceNumbersService.PRE_60_NODE_CHECKPOINT; + listener.onResponse(new ReplicaResponse(pre60NodeCheckpoint, pre60NodeCheckpoint)); } } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 1b4fd20140dad..6fa06c25457b0 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.shard.ReplicationGroup; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; @@ -173,6 +174,7 @@ public void onResponse(ReplicaResponse response) { successfulShards.incrementAndGet(); try { primary.updateLocalCheckpointForShard(shard.allocationId().getId(), response.localCheckpoint()); + primary.updateGlobalCheckpointForShard(shard.allocationId().getId(), response.globalCheckpoint()); } catch (final AlreadyClosedException e) { // okay, the index was deleted or this shard was never activated after a relocation; fall through and finish normally } catch (final Exception e) { @@ -315,6 +317,14 @@ public interface Primary< */ void updateLocalCheckpointForShard(String allocationId, long checkpoint); + /** + * Update the local knowledge of the global checkpoint for the specified allocation ID. + * + * @param allocationId the allocation ID to update the global checkpoint for + * @param globalCheckpoint the global checkpoint + */ + void updateGlobalCheckpointForShard(String allocationId, long globalCheckpoint); + /** * Returns the local checkpoint on the primary shard. * @@ -385,12 +395,24 @@ void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, Runnable } /** - * An interface to encapsulate the metadata needed from replica shards when they respond to operations performed on them + * An interface to encapsulate the metadata needed from replica shards when they respond to operations performed on them. */ public interface ReplicaResponse { - /** the local check point for the shard. see {@link org.elasticsearch.index.seqno.SequenceNumbersService#getLocalCheckpoint()} */ + /** + * The local checkpoint for the shard. See {@link SequenceNumbersService#getLocalCheckpoint()}. + * + * @return the local checkpoint + **/ long localCheckpoint(); + + /** + * The global checkpoint for the shard. See {@link SequenceNumbersService#getGlobalCheckpoint()}. + * + * @return the global checkpoint + **/ + long globalCheckpoint(); + } public static class RetryOnPrimaryException extends ElasticsearchException { diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 1a96a159179c4..15fc8075fa108 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -531,7 +531,8 @@ public void onResponse(Releasable releasable) { try { final ReplicaResult replicaResult = shardOperationOnReplica(request, replica); releasable.close(); // release shard operation lock before responding to caller - final TransportReplicationAction.ReplicaResponse response = new ReplicaResponse(replica.getLocalCheckpoint()); + final TransportReplicationAction.ReplicaResponse response = + new ReplicaResponse(replica.getLocalCheckpoint(), replica.getGlobalCheckpoint()); replicaResult.respond(new ResponseListener(response)); } catch (final Exception e) { Releasables.closeWhileHandlingException(releasable); // release shard operation lock before responding to caller @@ -1006,6 +1007,11 @@ public void updateLocalCheckpointForShard(String allocationId, long checkpoint) indexShard.updateLocalCheckpointForShard(allocationId, checkpoint); } + @Override + public void updateGlobalCheckpointForShard(final String allocationId, final long globalCheckpoint) { + indexShard.updateGlobalCheckpointForShard(allocationId, globalCheckpoint); + } + @Override public long localCheckpoint() { return indexShard.getLocalCheckpoint(); @@ -1025,40 +1031,47 @@ public ReplicationGroup getReplicationGroup() { public static class ReplicaResponse extends ActionResponse implements ReplicationOperation.ReplicaResponse { private long localCheckpoint; + private long globalCheckpoint; ReplicaResponse() { } - public ReplicaResponse(long localCheckpoint) { + public ReplicaResponse(long localCheckpoint, long globalCheckpoint) { /* - * A replica should always know its own local checkpoint so this should always be a valid sequence number or the pre-6.0 local + * A replica should always know its own local checkpoints so this should always be a valid sequence number or the pre-6.0 * checkpoint value when simulating responses to replication actions that pre-6.0 nodes are not aware of (e.g., the global * checkpoint background sync, and the primary/replica resync). */ assert localCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO; this.localCheckpoint = localCheckpoint; + this.globalCheckpoint = globalCheckpoint; } @Override public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { - super.readFrom(in); localCheckpoint = in.readZLong(); } else { // 5.x used to read empty responses, which don't really read anything off the stream, so just do nothing. - localCheckpoint = SequenceNumbersService.PRE_60_NODE_LOCAL_CHECKPOINT; + localCheckpoint = SequenceNumbersService.PRE_60_NODE_CHECKPOINT; + } + if (in.getVersion().onOrAfter(Version.V_6_0_0_rc1)) { + globalCheckpoint = in.readZLong(); + } else { + globalCheckpoint = SequenceNumbersService.PRE_60_NODE_CHECKPOINT; } } @Override public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { - super.writeTo(out); out.writeZLong(localCheckpoint); - } else { - // we use to write empty responses - Empty.INSTANCE.writeTo(out); + } + if (out.getVersion().onOrAfter(Version.V_6_0_0_rc1)) { + out.writeZLong(globalCheckpoint); } } @@ -1066,6 +1079,12 @@ public void writeTo(StreamOutput out) throws IOException { public long localCheckpoint() { return localCheckpoint; } + + @Override + public long globalCheckpoint() { + return globalCheckpoint; + } + } /** diff --git a/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java b/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java index c7059d05919b1..e89d231ae6eb0 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java @@ -89,7 +89,8 @@ protected void sendReplicaRequest( if (node.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { super.sendReplicaRequest(replicaRequest, node, listener); } else { - listener.onResponse(new ReplicaResponse(SequenceNumbersService.PRE_60_NODE_LOCAL_CHECKPOINT)); + final long pre60NodeCheckpoint = SequenceNumbersService.PRE_60_NODE_CHECKPOINT; + listener.onResponse(new ReplicaResponse(pre60NodeCheckpoint, pre60NodeCheckpoint)); } } diff --git a/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointTracker.java b/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointTracker.java index 4d9c493540280..416d6abdcb4cd 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointTracker.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointTracker.java @@ -19,6 +19,8 @@ package org.elasticsearch.index.seqno; +import com.carrotsearch.hppc.ObjectLongHashMap; +import com.carrotsearch.hppc.ObjectLongMap; import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; @@ -36,8 +38,13 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Map; +import java.util.OptionalLong; import java.util.Set; +import java.util.function.Function; +import java.util.function.LongConsumer; +import java.util.function.ToLongFunction; import java.util.stream.Collectors; +import java.util.stream.LongStream; /** * This class is responsible of tracking the global checkpoint. The global checkpoint is the highest sequence number for which all lower (or @@ -50,7 +57,10 @@ */ public class GlobalCheckpointTracker extends AbstractIndexShardComponent { - private final String allocationId; + /** + * The allocation ID for the shard to which this tracker is a component of. + */ + final String shardAllocationId; /** * The global checkpoint tracker can operate in two modes: @@ -103,9 +113,9 @@ public class GlobalCheckpointTracker extends AbstractIndexShardComponent { /** * Local checkpoint information for all shard copies that are tracked. Has an entry for all shard copies that are either initializing * and / or in-sync, possibly also containing information about unassigned in-sync shard copies. The information that is tracked for - * each shard copy is explained in the docs for the {@link LocalCheckpointState} class. + * each shard copy is explained in the docs for the {@link CheckpointState} class. */ - final Map localCheckpoints; + final Map checkpoints; /** * This set contains allocation IDs for which there is a thread actively waiting for the local checkpoint to advance to at least the @@ -113,60 +123,67 @@ public class GlobalCheckpointTracker extends AbstractIndexShardComponent { */ final Set pendingInSync; - /** - * The global checkpoint: - * - computed based on local checkpoints, if the tracker is in primary mode - * - received from the primary, if the tracker is in replica mode - */ - volatile long globalCheckpoint; - /** * Cached value for the last replication group that was computed */ volatile ReplicationGroup replicationGroup; - public static class LocalCheckpointState implements Writeable { + public static class CheckpointState implements Writeable { /** * the last local checkpoint information that we have for this shard */ long localCheckpoint; + + /** + * the last global checkpoint information that we have for this shard. This information is computed for the primary if + * the tracker is in primary mode and received from the primary if in replica mode. + */ + long globalCheckpoint; /** * whether this shard is treated as in-sync and thus contributes to the global checkpoint calculation */ boolean inSync; - public LocalCheckpointState(long localCheckpoint, boolean inSync) { + public CheckpointState(long localCheckpoint, long globalCheckpoint, boolean inSync) { this.localCheckpoint = localCheckpoint; + this.globalCheckpoint = globalCheckpoint; this.inSync = inSync; } - public LocalCheckpointState(StreamInput in) throws IOException { + public CheckpointState(StreamInput in) throws IOException { this.localCheckpoint = in.readZLong(); + this.globalCheckpoint = in.readZLong(); this.inSync = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeZLong(localCheckpoint); + out.writeZLong(globalCheckpoint); out.writeBoolean(inSync); } /** * Returns a full copy of this object */ - public LocalCheckpointState copy() { - return new LocalCheckpointState(localCheckpoint, inSync); + public CheckpointState copy() { + return new CheckpointState(localCheckpoint, globalCheckpoint, inSync); } public long getLocalCheckpoint() { return localCheckpoint; } + public long getGlobalCheckpoint() { + return globalCheckpoint; + } + @Override public String toString() { return "LocalCheckpointState{" + "localCheckpoint=" + localCheckpoint + + ", globalCheckpoint=" + globalCheckpoint + ", inSync=" + inSync + '}'; } @@ -176,40 +193,71 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - LocalCheckpointState that = (LocalCheckpointState) o; + CheckpointState that = (CheckpointState) o; if (localCheckpoint != that.localCheckpoint) return false; + if (globalCheckpoint != that.globalCheckpoint) return false; return inSync == that.inSync; } @Override public int hashCode() { - int result = (int) (localCheckpoint ^ (localCheckpoint >>> 32)); - result = 31 * result + (inSync ? 1 : 0); + int result = Long.hashCode(localCheckpoint); + result = 31 * result + Long.hashCode(globalCheckpoint); + result = 31 * result + Boolean.hashCode(inSync); return result; } } + synchronized ObjectLongMap getGlobalCheckpoints() { + assert primaryMode; + assert handoffInProgress == false; + final ObjectLongMap globalCheckpoints = new ObjectLongHashMap<>(checkpoints.size()); + for (final Map.Entry cps : checkpoints.entrySet()) { + globalCheckpoints.put(cps.getKey(), cps.getValue().globalCheckpoint); + } + return globalCheckpoints; + } + /** * Class invariant that should hold before and after every invocation of public methods on this class. As Java lacks implication * as a logical operator, many of the invariants are written under the form (!A || B), they should be read as (A implies B) however. */ private boolean invariant() { + assert checkpoints.get(shardAllocationId) != null : + "checkpoints map should always have an entry for the current shard"; + // local checkpoints only set during primary mode - assert primaryMode || localCheckpoints.values().stream() + assert primaryMode || checkpoints.values().stream() .allMatch(lcps -> lcps.localCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO || - lcps.localCheckpoint == SequenceNumbersService.PRE_60_NODE_LOCAL_CHECKPOINT); + lcps.localCheckpoint == SequenceNumbersService.PRE_60_NODE_CHECKPOINT); + + // global checkpoints for other shards only set during primary mode + assert primaryMode + || checkpoints + .entrySet() + .stream() + .filter(e -> e.getKey().equals(shardAllocationId) == false) + .map(Map.Entry::getValue) + .allMatch(cps -> + (cps.globalCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO + || cps.globalCheckpoint == SequenceNumbersService.PRE_60_NODE_CHECKPOINT)); // relocation handoff can only occur in primary mode assert !handoffInProgress || primaryMode; - // there is at least one in-sync shard copy when the global checkpoint tracker operates in primary mode (i.e. the shard itself) - assert !primaryMode || localCheckpoints.values().stream().anyMatch(lcps -> lcps.inSync); + // the current shard is marked as in-sync when the global checkpoint tracker operates in primary mode + assert !primaryMode || checkpoints.get(shardAllocationId).inSync; // the routing table and replication group is set when the global checkpoint tracker operates in primary mode assert !primaryMode || (routingTable != null && replicationGroup != null) : "primary mode but routing table is " + routingTable + " and replication group is " + replicationGroup; + // when in primary mode, the current allocation ID is the allocation ID of the primary or the relocation allocation ID + assert !primaryMode + || (routingTable.primaryShard().allocationId().getId().equals(shardAllocationId) + || routingTable.primaryShard().allocationId().getRelocationId().equals(shardAllocationId)); + // during relocation handoff there are no entries blocking global checkpoint advancement assert !handoffInProgress || pendingInSync.isEmpty() : "entries blocking global checkpoint advancement during relocation handoff: " + pendingInSync; @@ -218,9 +266,24 @@ private boolean invariant() { assert pendingInSync.isEmpty() || (primaryMode && !handoffInProgress); // the computed global checkpoint is always up-to-date - assert !primaryMode || globalCheckpoint == computeGlobalCheckpoint(pendingInSync, localCheckpoints.values(), globalCheckpoint) : - "global checkpoint is not up-to-date, expected: " + - computeGlobalCheckpoint(pendingInSync, localCheckpoints.values(), globalCheckpoint) + " but was: " + globalCheckpoint; + assert !primaryMode + || getGlobalCheckpoint() == computeGlobalCheckpoint(pendingInSync, checkpoints.values(), getGlobalCheckpoint()) + : "global checkpoint is not up-to-date, expected: " + + computeGlobalCheckpoint(pendingInSync, checkpoints.values(), getGlobalCheckpoint()) + " but was: " + getGlobalCheckpoint(); + + // when in primary mode, the global checkpoint is at most the minimum local checkpoint on all in-sync shard copies + assert !primaryMode + || getGlobalCheckpoint() <= inSyncCheckpointStates(checkpoints, CheckpointState::getLocalCheckpoint, LongStream::min) + : "global checkpoint [" + getGlobalCheckpoint() + "] " + + "for primary mode allocation ID [" + shardAllocationId + "] " + + "more than in-sync local checkpoints [" + checkpoints + "]"; + + // when in primary mode, the local knowledge of the global checkpoints on shard copies is bounded by the global checkpoint + assert !primaryMode + || getGlobalCheckpoint() >= inSyncCheckpointStates(checkpoints, CheckpointState::getGlobalCheckpoint, LongStream::max) + : "global checkpoint [" + getGlobalCheckpoint() + "] " + + "for primary mode allocation ID [" + shardAllocationId + "] " + + "less than in-sync global checkpoints [" + checkpoints + "]"; // we have a routing table iff we have a replication group assert (routingTable == null) == (replicationGroup == null) : @@ -230,10 +293,10 @@ private boolean invariant() { "cached replication group out of sync: expected: " + calculateReplicationGroup() + " but was: " + replicationGroup; // all assigned shards from the routing table are tracked - assert routingTable == null || localCheckpoints.keySet().containsAll(routingTable.getAllAllocationIds()) : - "local checkpoints " + localCheckpoints + " not in-sync with routing table " + routingTable; + assert routingTable == null || checkpoints.keySet().containsAll(routingTable.getAllAllocationIds()) : + "local checkpoints " + checkpoints + " not in-sync with routing table " + routingTable; - for (Map.Entry entry : localCheckpoints.entrySet()) { + for (Map.Entry entry : checkpoints.entrySet()) { // blocking global checkpoint advancement only happens for shards that are not in-sync assert !pendingInSync.contains(entry.getKey()) || !entry.getValue().inSync : "shard copy " + entry.getKey() + " blocks global checkpoint advancement but is in-sync"; @@ -242,6 +305,21 @@ private boolean invariant() { return true; } + private static long inSyncCheckpointStates( + final Map checkpoints, + ToLongFunction function, + Function reducer) { + final OptionalLong value = + reducer.apply( + checkpoints + .values() + .stream() + .filter(cps -> cps.inSync) + .mapToLong(function) + .filter(v -> v != SequenceNumbers.UNASSIGNED_SEQ_NO)); + return value.isPresent() ? value.getAsLong() : SequenceNumbers.UNASSIGNED_SEQ_NO; + } + /** * Initialize the global checkpoint service. The specified global checkpoint should be set to the last known global checkpoint, or * {@link SequenceNumbers#UNASSIGNED_SEQ_NO}. @@ -258,12 +336,12 @@ private boolean invariant() { final long globalCheckpoint) { super(shardId, indexSettings); assert globalCheckpoint >= SequenceNumbers.UNASSIGNED_SEQ_NO : "illegal initial global checkpoint: " + globalCheckpoint; - this.allocationId = allocationId; + this.shardAllocationId = allocationId; this.primaryMode = false; this.handoffInProgress = false; this.appliedClusterStateVersion = -1L; - this.globalCheckpoint = globalCheckpoint; - this.localCheckpoints = new HashMap<>(1 + indexSettings.getNumberOfReplicas()); + this.checkpoints = new HashMap<>(1 + indexSettings.getNumberOfReplicas()); + checkpoints.put(allocationId, new CheckpointState(SequenceNumbers.UNASSIGNED_SEQ_NO, globalCheckpoint, false)); this.pendingInSync = new HashSet<>(); this.routingTable = null; this.replicationGroup = null; @@ -282,7 +360,7 @@ public ReplicationGroup getReplicationGroup() { private ReplicationGroup calculateReplicationGroup() { return new ReplicationGroup(routingTable, - localCheckpoints.entrySet().stream().filter(e -> e.getValue().inSync).map(Map.Entry::getKey).collect(Collectors.toSet())); + checkpoints.entrySet().stream().filter(e -> e.getValue().inSync).map(Map.Entry::getKey).collect(Collectors.toSet())); } /** @@ -290,8 +368,10 @@ private ReplicationGroup calculateReplicationGroup() { * * @return the global checkpoint */ - public long getGlobalCheckpoint() { - return globalCheckpoint; + public synchronized long getGlobalCheckpoint() { + final CheckpointState cps = checkpoints.get(shardAllocationId); + assert cps != null; + return cps.globalCheckpoint; } /** @@ -306,27 +386,58 @@ public synchronized void updateGlobalCheckpointOnReplica(final long globalCheckp /* * The global checkpoint here is a local knowledge which is updated under the mandate of the primary. It can happen that the primary * information is lagging compared to a replica (e.g., if a replica is promoted to primary but has stale info relative to other - * replica shards). In these cases, the local knowledge of the global checkpoint could be higher than sync from the lagging primary. + * replica shards). In these cases, the local knowledge of the global checkpoint could be higher than the sync from the lagging + * primary. */ - if (this.globalCheckpoint <= globalCheckpoint) { - logger.trace("updating global checkpoint from [{}] to [{}] due to [{}]", this.globalCheckpoint, globalCheckpoint, reason); - this.globalCheckpoint = globalCheckpoint; - } + updateGlobalCheckpoint( + shardAllocationId, + globalCheckpoint, + current -> logger.trace("updating global checkpoint from [{}] to [{}] due to [{}]", current, globalCheckpoint, reason)); + assert invariant(); + } + + /** + * Update the local knowledge of the global checkpoint for the specified allocation ID. + * + * @param allocationId the allocation ID to update the global checkpoint for + * @param globalCheckpoint the global checkpoint + */ + public synchronized void updateGlobalCheckpointForShard(final String allocationId, final long globalCheckpoint) { + assert primaryMode; + assert handoffInProgress == false; + assert invariant(); + updateGlobalCheckpoint( + allocationId, + globalCheckpoint, + current -> logger.trace( + "updating local knowledge for [{}] on the primary of the global checkpoint from [{}] to [{}]", + allocationId, + current, + globalCheckpoint)); assert invariant(); } + private void updateGlobalCheckpoint(final String allocationId, final long globalCheckpoint, LongConsumer ifUpdated) { + final CheckpointState cps = checkpoints.get(allocationId); + assert !this.shardAllocationId.equals(allocationId) || cps != null; + if (cps != null && globalCheckpoint > cps.globalCheckpoint) { + ifUpdated.accept(cps.globalCheckpoint); + cps.globalCheckpoint = globalCheckpoint; + } + } + /** * Initializes the global checkpoint tracker in primary mode (see {@link #primaryMode}. Called on primary activation or promotion. */ public synchronized void activatePrimaryMode(final long localCheckpoint) { assert invariant(); assert primaryMode == false; - assert localCheckpoints.get(allocationId) != null && localCheckpoints.get(allocationId).inSync && - localCheckpoints.get(allocationId).localCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO : - "expected " + allocationId + " to have initialized entry in " + localCheckpoints + " when activating primary"; + assert checkpoints.get(shardAllocationId) != null && checkpoints.get(shardAllocationId).inSync && + checkpoints.get(shardAllocationId).localCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO : + "expected " + shardAllocationId + " to have initialized entry in " + checkpoints + " when activating primary"; assert localCheckpoint >= SequenceNumbers.NO_OPS_PERFORMED; primaryMode = true; - updateLocalCheckpoint(allocationId, localCheckpoints.get(allocationId), localCheckpoint); + updateLocalCheckpoint(shardAllocationId, checkpoints.get(shardAllocationId), localCheckpoint); updateGlobalCheckpointOnPrimary(); assert invariant(); } @@ -345,37 +456,47 @@ public synchronized void updateFromMaster(final long applyingClusterStateVersion if (applyingClusterStateVersion > appliedClusterStateVersion) { // check that the master does not fabricate new in-sync entries out of thin air once we are in primary mode assert !primaryMode || inSyncAllocationIds.stream().allMatch( - inSyncId -> localCheckpoints.containsKey(inSyncId) && localCheckpoints.get(inSyncId).inSync) : + inSyncId -> checkpoints.containsKey(inSyncId) && checkpoints.get(inSyncId).inSync) : "update from master in primary mode contains in-sync ids " + inSyncAllocationIds + - " that have no matching entries in " + localCheckpoints; + " that have no matching entries in " + checkpoints; // remove entries which don't exist on master Set initializingAllocationIds = routingTable.getAllInitializingShards().stream() .map(ShardRouting::allocationId).map(AllocationId::getId).collect(Collectors.toSet()); - boolean removedEntries = localCheckpoints.keySet().removeIf( + boolean removedEntries = checkpoints.keySet().removeIf( aid -> !inSyncAllocationIds.contains(aid) && !initializingAllocationIds.contains(aid)); if (primaryMode) { // add new initializingIds that are missing locally. These are fresh shard copies - and not in-sync for (String initializingId : initializingAllocationIds) { - if (localCheckpoints.containsKey(initializingId) == false) { + if (checkpoints.containsKey(initializingId) == false) { final boolean inSync = inSyncAllocationIds.contains(initializingId); assert inSync == false : "update from master in primary mode has " + initializingId + " as in-sync but it does not exist locally"; final long localCheckpoint = pre60AllocationIds.contains(initializingId) ? - SequenceNumbersService.PRE_60_NODE_LOCAL_CHECKPOINT : SequenceNumbers.UNASSIGNED_SEQ_NO; - localCheckpoints.put(initializingId, new LocalCheckpointState(localCheckpoint, inSync)); + SequenceNumbersService.PRE_60_NODE_CHECKPOINT : SequenceNumbers.UNASSIGNED_SEQ_NO; + final long globalCheckpoint = localCheckpoint; + checkpoints.put(initializingId, new CheckpointState(localCheckpoint, globalCheckpoint, inSync)); } } } else { for (String initializingId : initializingAllocationIds) { - final long localCheckpoint = pre60AllocationIds.contains(initializingId) ? - SequenceNumbersService.PRE_60_NODE_LOCAL_CHECKPOINT : SequenceNumbers.UNASSIGNED_SEQ_NO; - localCheckpoints.put(initializingId, new LocalCheckpointState(localCheckpoint, false)); + if (shardAllocationId.equals(initializingId) == false) { + final long localCheckpoint = pre60AllocationIds.contains(initializingId) ? + SequenceNumbersService.PRE_60_NODE_CHECKPOINT : SequenceNumbers.UNASSIGNED_SEQ_NO; + final long globalCheckpoint = localCheckpoint; + checkpoints.put(initializingId, new CheckpointState(localCheckpoint, globalCheckpoint, false)); + } } for (String inSyncId : inSyncAllocationIds) { - final long localCheckpoint = pre60AllocationIds.contains(inSyncId) ? - SequenceNumbersService.PRE_60_NODE_LOCAL_CHECKPOINT : SequenceNumbers.UNASSIGNED_SEQ_NO; - localCheckpoints.put(inSyncId, new LocalCheckpointState(localCheckpoint, true)); + if (shardAllocationId.equals(inSyncId)) { + // current shard is initially marked as not in-sync because we don't know better at that point + checkpoints.get(shardAllocationId).inSync = true; + } else { + final long localCheckpoint = pre60AllocationIds.contains(inSyncId) ? + SequenceNumbersService.PRE_60_NODE_CHECKPOINT : SequenceNumbers.UNASSIGNED_SEQ_NO; + final long globalCheckpoint = localCheckpoint; + checkpoints.put(inSyncId, new CheckpointState(localCheckpoint, globalCheckpoint, true)); + } } } appliedClusterStateVersion = applyingClusterStateVersion; @@ -397,8 +518,8 @@ public synchronized void updateFromMaster(final long applyingClusterStateVersion public synchronized void initiateTracking(final String allocationId) { assert invariant(); assert primaryMode; - LocalCheckpointState lcps = localCheckpoints.get(allocationId); - if (lcps == null) { + CheckpointState cps = checkpoints.get(allocationId); + if (cps == null) { // can happen if replica was removed from cluster but recovery process is unaware of it yet throw new IllegalStateException("no local checkpoint tracking information available"); } @@ -416,21 +537,21 @@ public synchronized void markAllocationIdAsInSync(final String allocationId, fin assert invariant(); assert primaryMode; assert handoffInProgress == false; - LocalCheckpointState lcps = localCheckpoints.get(allocationId); - if (lcps == null) { + CheckpointState cps = checkpoints.get(allocationId); + if (cps == null) { // can happen if replica was removed from cluster but recovery process is unaware of it yet throw new IllegalStateException("no local checkpoint tracking information available for " + allocationId); } assert localCheckpoint >= SequenceNumbers.NO_OPS_PERFORMED : "expected known local checkpoint for " + allocationId + " but was " + localCheckpoint; assert pendingInSync.contains(allocationId) == false : "shard copy " + allocationId + " is already marked as pending in-sync"; - updateLocalCheckpoint(allocationId, lcps, localCheckpoint); + updateLocalCheckpoint(allocationId, cps, localCheckpoint); // if it was already in-sync (because of a previously failed recovery attempt), global checkpoint must have been // stuck from advancing - assert !lcps.inSync || (lcps.localCheckpoint >= globalCheckpoint) : - "shard copy " + allocationId + " that's already in-sync should have a local checkpoint " + lcps.localCheckpoint + - " that's above the global checkpoint " + globalCheckpoint; - if (lcps.localCheckpoint < globalCheckpoint) { + assert !cps.inSync || (cps.localCheckpoint >= getGlobalCheckpoint()) : + "shard copy " + allocationId + " that's already in-sync should have a local checkpoint " + cps.localCheckpoint + + " that's above the global checkpoint " + getGlobalCheckpoint(); + if (cps.localCheckpoint < getGlobalCheckpoint()) { pendingInSync.add(allocationId); try { while (true) { @@ -444,7 +565,7 @@ public synchronized void markAllocationIdAsInSync(final String allocationId, fin pendingInSync.remove(allocationId); } } else { - lcps.inSync = true; + cps.inSync = true; replicationGroup = calculateReplicationGroup(); logger.trace("marked [{}] as in-sync", allocationId); updateGlobalCheckpointOnPrimary(); @@ -453,21 +574,21 @@ public synchronized void markAllocationIdAsInSync(final String allocationId, fin assert invariant(); } - private boolean updateLocalCheckpoint(String allocationId, LocalCheckpointState lcps, long localCheckpoint) { - // a local checkpoint of PRE_60_NODE_LOCAL_CHECKPOINT cannot be overridden - assert lcps.localCheckpoint != SequenceNumbersService.PRE_60_NODE_LOCAL_CHECKPOINT || - localCheckpoint == SequenceNumbersService.PRE_60_NODE_LOCAL_CHECKPOINT : + private boolean updateLocalCheckpoint(String allocationId, CheckpointState cps, long localCheckpoint) { + // a local checkpoint of PRE_60_NODE_CHECKPOINT cannot be overridden + assert cps.localCheckpoint != SequenceNumbersService.PRE_60_NODE_CHECKPOINT || + localCheckpoint == SequenceNumbersService.PRE_60_NODE_CHECKPOINT : "pre-6.0 shard copy " + allocationId + " unexpected to send valid local checkpoint " + localCheckpoint; // a local checkpoint for a shard copy should be a valid sequence number or the pre-6.0 sequence number indicator assert localCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO : "invalid local checkpoint for shard copy [" + allocationId + "]"; - if (localCheckpoint > lcps.localCheckpoint) { - logger.trace("updated local checkpoint of [{}] from [{}] to [{}]", allocationId, lcps.localCheckpoint, localCheckpoint); - lcps.localCheckpoint = localCheckpoint; + if (localCheckpoint > cps.localCheckpoint) { + logger.trace("updated local checkpoint of [{}] from [{}] to [{}]", allocationId, cps.localCheckpoint, localCheckpoint); + cps.localCheckpoint = localCheckpoint; return true; } else { logger.trace("skipped updating local checkpoint of [{}] from [{}] to [{}], current checkpoint is higher", allocationId, - lcps.localCheckpoint, localCheckpoint); + cps.localCheckpoint, localCheckpoint); return false; } } @@ -483,17 +604,17 @@ public synchronized void updateLocalCheckpoint(final String allocationId, final assert invariant(); assert primaryMode; assert handoffInProgress == false; - LocalCheckpointState lcps = localCheckpoints.get(allocationId); - if (lcps == null) { + CheckpointState cps = checkpoints.get(allocationId); + if (cps == null) { // can happen if replica was removed from cluster but replication process is unaware of it yet return; } - boolean increasedLocalCheckpoint = updateLocalCheckpoint(allocationId, lcps, localCheckpoint); + boolean increasedLocalCheckpoint = updateLocalCheckpoint(allocationId, cps, localCheckpoint); boolean pending = pendingInSync.contains(allocationId); - if (pending && lcps.localCheckpoint >= globalCheckpoint) { + if (pending && cps.localCheckpoint >= getGlobalCheckpoint()) { pendingInSync.remove(allocationId); pending = false; - lcps.inSync = true; + cps.inSync = true; replicationGroup = calculateReplicationGroup(); logger.trace("marked [{}] as in-sync", allocationId); notifyAllWaiters(); @@ -508,21 +629,21 @@ public synchronized void updateLocalCheckpoint(final String allocationId, final * Computes the global checkpoint based on the given local checkpoints. In case where there are entries preventing the * computation to happen (for example due to blocking), it returns the fallback value. */ - private static long computeGlobalCheckpoint(final Set pendingInSync, final Collection localCheckpoints, + private static long computeGlobalCheckpoint(final Set pendingInSync, final Collection localCheckpoints, final long fallback) { long minLocalCheckpoint = Long.MAX_VALUE; if (pendingInSync.isEmpty() == false) { return fallback; } - for (final LocalCheckpointState lcps : localCheckpoints) { - if (lcps.inSync) { - if (lcps.localCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO) { + for (final CheckpointState cps : localCheckpoints) { + if (cps.inSync) { + if (cps.localCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO) { // unassigned in-sync replica return fallback; - } else if (lcps.localCheckpoint == SequenceNumbersService.PRE_60_NODE_LOCAL_CHECKPOINT) { + } else if (cps.localCheckpoint == SequenceNumbersService.PRE_60_NODE_CHECKPOINT) { // 5.x replica, ignore for global checkpoint calculation } else { - minLocalCheckpoint = Math.min(lcps.localCheckpoint, minLocalCheckpoint); + minLocalCheckpoint = Math.min(cps.localCheckpoint, minLocalCheckpoint); } } } @@ -535,12 +656,14 @@ private static long computeGlobalCheckpoint(final Set pendingInSync, fin */ private synchronized void updateGlobalCheckpointOnPrimary() { assert primaryMode; - final long computedGlobalCheckpoint = computeGlobalCheckpoint(pendingInSync, localCheckpoints.values(), globalCheckpoint); + final CheckpointState cps = checkpoints.get(shardAllocationId); + final long globalCheckpoint = cps.globalCheckpoint; + final long computedGlobalCheckpoint = computeGlobalCheckpoint(pendingInSync, checkpoints.values(), getGlobalCheckpoint()); assert computedGlobalCheckpoint >= globalCheckpoint : "new global checkpoint [" + computedGlobalCheckpoint + "] is lower than previous one [" + globalCheckpoint + "]"; if (globalCheckpoint != computedGlobalCheckpoint) { logger.trace("global checkpoint updated to [{}]", computedGlobalCheckpoint); - globalCheckpoint = computedGlobalCheckpoint; + cps.globalCheckpoint = computedGlobalCheckpoint; } } @@ -553,13 +676,13 @@ public synchronized PrimaryContext startRelocationHandoff() { assert handoffInProgress == false; assert pendingInSync.isEmpty() : "relocation handoff started while there are still shard copies pending in-sync: " + pendingInSync; handoffInProgress = true; - // copy clusterStateVersion and localCheckpoints and return - // all the entries from localCheckpoints that are inSync: the reason we don't need to care about initializing non-insync entries + // copy clusterStateVersion and checkpoints and return + // all the entries from checkpoints that are inSync: the reason we don't need to care about initializing non-insync entries // is that they will have to undergo a recovery attempt on the relocation target, and will hence be supplied by the cluster state // update on the relocation target once relocation completes). We could alternatively also copy the map as-is (it’s safe), and it // would be cleaned up on the target by cluster state updates. - Map localCheckpointsCopy = new HashMap<>(); - for (Map.Entry entry : localCheckpoints.entrySet()) { + Map localCheckpointsCopy = new HashMap<>(); + for (Map.Entry entry : checkpoints.entrySet()) { localCheckpointsCopy.put(entry.getKey(), entry.getValue().copy()); } assert invariant(); @@ -586,11 +709,19 @@ public synchronized void completeRelocationHandoff() { assert handoffInProgress; primaryMode = false; handoffInProgress = false; - // forget all checkpoint information - localCheckpoints.values().stream().forEach(lcps -> { - if (lcps.localCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO && - lcps.localCheckpoint != SequenceNumbersService.PRE_60_NODE_LOCAL_CHECKPOINT) { - lcps.localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; + // forget all checkpoint information except for global checkpoint of current shard + checkpoints.entrySet().stream().forEach(e -> { + final CheckpointState cps = e.getValue(); + if (cps.localCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO && + cps.localCheckpoint != SequenceNumbersService.PRE_60_NODE_CHECKPOINT) { + cps.localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; + } + if (e.getKey().equals(shardAllocationId) == false) { + // don't throw global checkpoint information of current shard away + if (cps.globalCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO && + cps.globalCheckpoint != SequenceNumbersService.PRE_60_NODE_CHECKPOINT) { + cps.globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; + } } }); assert invariant(); @@ -609,9 +740,9 @@ public synchronized void activateWithPrimaryContext(PrimaryContext primaryContex primaryMode = true; // capture current state to possibly replay missed cluster state update appliedClusterStateVersion = primaryContext.clusterStateVersion(); - localCheckpoints.clear(); - for (Map.Entry entry : primaryContext.localCheckpoints.entrySet()) { - localCheckpoints.put(entry.getKey(), entry.getValue().copy()); + checkpoints.clear(); + for (Map.Entry entry : primaryContext.checkpoints.entrySet()) { + checkpoints.put(entry.getKey(), entry.getValue().copy()); } routingTable = primaryContext.getRoutingTable(); replicationGroup = calculateReplicationGroup(); @@ -628,11 +759,11 @@ private Runnable getMasterUpdateOperationFromCurrentState() { final long lastAppliedClusterStateVersion = appliedClusterStateVersion; final Set inSyncAllocationIds = new HashSet<>(); final Set pre60AllocationIds = new HashSet<>(); - localCheckpoints.entrySet().forEach(entry -> { + checkpoints.entrySet().forEach(entry -> { if (entry.getValue().inSync) { inSyncAllocationIds.add(entry.getKey()); } - if (entry.getValue().getLocalCheckpoint() == SequenceNumbersService.PRE_60_NODE_LOCAL_CHECKPOINT) { + if (entry.getValue().getLocalCheckpoint() == SequenceNumbersService.PRE_60_NODE_CHECKPOINT) { pre60AllocationIds.add(entry.getKey()); } }); @@ -651,9 +782,9 @@ public synchronized boolean pendingInSync() { /** * Returns the local checkpoint information tracked for a specific shard. Used by tests. */ - public synchronized LocalCheckpointState getTrackedLocalCheckpointForShard(String allocationId) { + public synchronized CheckpointState getTrackedLocalCheckpointForShard(String allocationId) { assert primaryMode; - return localCheckpoints.get(allocationId); + return checkpoints.get(allocationId); } /** @@ -682,19 +813,19 @@ private synchronized void waitForLocalCheckpointToAdvance() throws InterruptedEx public static class PrimaryContext implements Writeable { private final long clusterStateVersion; - private final Map localCheckpoints; + private final Map checkpoints; private final IndexShardRoutingTable routingTable; - public PrimaryContext(long clusterStateVersion, Map localCheckpoints, + public PrimaryContext(long clusterStateVersion, Map checkpoints, IndexShardRoutingTable routingTable) { this.clusterStateVersion = clusterStateVersion; - this.localCheckpoints = localCheckpoints; + this.checkpoints = checkpoints; this.routingTable = routingTable; } public PrimaryContext(StreamInput in) throws IOException { clusterStateVersion = in.readVLong(); - localCheckpoints = in.readMap(StreamInput::readString, LocalCheckpointState::new); + checkpoints = in.readMap(StreamInput::readString, CheckpointState::new); routingTable = IndexShardRoutingTable.Builder.readFrom(in); } @@ -702,8 +833,8 @@ public long clusterStateVersion() { return clusterStateVersion; } - public Map getLocalCheckpoints() { - return localCheckpoints; + public Map getCheckpointStates() { + return checkpoints; } public IndexShardRoutingTable getRoutingTable() { @@ -713,7 +844,7 @@ public IndexShardRoutingTable getRoutingTable() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(clusterStateVersion); - out.writeMap(localCheckpoints, (streamOutput, s) -> out.writeString(s), (streamOutput, lcps) -> lcps.writeTo(out)); + out.writeMap(checkpoints, (streamOutput, s) -> out.writeString(s), (streamOutput, cps) -> cps.writeTo(out)); IndexShardRoutingTable.Builder.writeTo(routingTable, out); } @@ -721,7 +852,7 @@ public void writeTo(StreamOutput out) throws IOException { public String toString() { return "PrimaryContext{" + "clusterStateVersion=" + clusterStateVersion + - ", localCheckpoints=" + localCheckpoints + + ", checkpoints=" + checkpoints + ", routingTable=" + routingTable + '}'; } @@ -740,8 +871,8 @@ public boolean equals(Object o) { @Override public int hashCode() { - int result = (int) (clusterStateVersion ^ (clusterStateVersion >>> 32)); - result = 31 * result + localCheckpoints.hashCode(); + int result = Long.hashCode(clusterStateVersion); + result = 31 * result + checkpoints.hashCode(); result = 31 * result + routingTable.hashCode(); return result; } diff --git a/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java b/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java index 2c4286e6e5798..fa0d0bc9b34b5 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.seqno; +import com.carrotsearch.hppc.ObjectLongMap; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.AbstractIndexShardComponent; @@ -35,7 +36,7 @@ public class SequenceNumbersService extends AbstractIndexShardComponent { /** * Represents a local checkpoint coming from a pre-6.0 node */ - public static final long PRE_60_NODE_LOCAL_CHECKPOINT = -3L; + public static final long PRE_60_NODE_CHECKPOINT = -3L; private final LocalCheckpointTracker localCheckpointTracker; private final GlobalCheckpointTracker globalCheckpointTracker; @@ -132,6 +133,20 @@ public void updateLocalCheckpointForShard(final String allocationId, final long globalCheckpointTracker.updateLocalCheckpoint(allocationId, checkpoint); } + /** + * Update the local knowledge of the global checkpoint for the specified allocation ID. + * + * @param allocationId the allocation ID to update the global checkpoint for + * @param globalCheckpoint the global checkpoint + */ + public void updateGlobalCheckpointForShard(final String allocationId, final long globalCheckpoint) { + globalCheckpointTracker.updateGlobalCheckpointForShard(allocationId, globalCheckpoint); + } + + public ObjectLongMap getGlobalCheckpoints() { + return globalCheckpointTracker.getGlobalCheckpoints(); + } + /** * Called when the recovery process for a shard is ready to open the engine on the target shard. * See {@link GlobalCheckpointTracker#initiateTracking(String)} for details. @@ -201,7 +216,7 @@ public synchronized long getTrackedLocalCheckpointForShard(final String allocati * Activates the global checkpoint tracker in primary mode (see {@link GlobalCheckpointTracker#primaryMode}. * Called on primary activation or promotion. */ - public void activatePrimaryMode(final String allocationId, final long localCheckpoint) { + public void activatePrimaryMode(final long localCheckpoint) { globalCheckpointTracker.activatePrimaryMode(localCheckpoint); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index dd47be5a141dd..24ad4cdb1b85c 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.shard; +import com.carrotsearch.hppc.ObjectLongMap; import org.apache.logging.log4j.Logger; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.IndexCommit; @@ -401,7 +402,7 @@ public void updateShardState(final ShardRouting newRouting, final DiscoveryNode recoverySourceNode = recoveryState.getSourceNode(); if (currentRouting.isRelocationTarget() == false || recoverySourceNode.getVersion().before(Version.V_6_0_0_alpha1)) { // there was no primary context hand-off in < 6.0.0, need to manually activate the shard - getEngine().seqNoService().activatePrimaryMode(currentRouting.allocationId().getId(), getEngine().seqNoService().getLocalCheckpoint()); + getEngine().seqNoService().activatePrimaryMode(getEngine().seqNoService().getLocalCheckpoint()); } } @@ -498,7 +499,7 @@ public void onFailure(Exception e) { } }, e -> failShard("exception during primary term transition", e)); - getEngine().seqNoService().activatePrimaryMode(currentRouting.allocationId().getId(), getEngine().seqNoService().getLocalCheckpoint()); + getEngine().seqNoService().activatePrimaryMode(getEngine().seqNoService().getLocalCheckpoint()); primaryTerm = newPrimaryTerm; latch.countDown(); } @@ -1673,6 +1674,18 @@ public void updateLocalCheckpointForShard(final String allocationId, final long getEngine().seqNoService().updateLocalCheckpointForShard(allocationId, checkpoint); } + /** + * Update the local knowledge of the global checkpoint for the specified allocation ID. + * + * @param allocationId the allocation ID to update the global checkpoint for + * @param globalCheckpoint the global checkpoint + */ + public void updateGlobalCheckpointForShard(final String allocationId, final long globalCheckpoint) { + verifyPrimary(); + verifyNotClosed(); + getEngine().seqNoService().updateGlobalCheckpointForShard(allocationId, globalCheckpoint); + } + /** * Waits for all operations up to the provided sequence number to complete. * @@ -1735,6 +1748,12 @@ public long getGlobalCheckpoint() { return getEngine().seqNoService().getGlobalCheckpoint(); } + public ObjectLongMap getGlobalCheckpoints() { + verifyPrimary(); + verifyNotClosed(); + return getEngine().seqNoService().getGlobalCheckpoints(); + } + /** * Returns the current replication group for the shard. * @@ -1783,9 +1802,9 @@ assert state() != IndexShardState.POST_RECOVERY && state() != IndexShardState.ST public void activateWithPrimaryContext(final GlobalCheckpointTracker.PrimaryContext primaryContext) { verifyPrimary(); assert shardRouting.isRelocationTarget() : "only relocation target can update allocation IDs from primary context: " + shardRouting; - assert primaryContext.getLocalCheckpoints().containsKey(routingEntry().allocationId().getId()) && + assert primaryContext.getCheckpointStates().containsKey(routingEntry().allocationId().getId()) && getEngine().seqNoService().getLocalCheckpoint() == - primaryContext.getLocalCheckpoints().get(routingEntry().allocationId().getId()).getLocalCheckpoint(); + primaryContext.getCheckpointStates().get(routingEntry().allocationId().getId()).getLocalCheckpoint(); getEngine().seqNoService().activateWithPrimaryContext(primaryContext); } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 70e1ba06b07e3..01d971ba32599 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -475,7 +475,9 @@ public void finalizeRecovery(final long targetLocalCheckpoint) { * the permit then the state of the shard will be relocated and this recovery will fail. */ runUnderPrimaryPermit(() -> shard.markAllocationIdAsInSync(request.targetAllocationId(), targetLocalCheckpoint)); - cancellableThreads.execute(() -> recoveryTarget.finalizeRecovery(shard.getGlobalCheckpoint())); + final long globalCheckpoint = shard.getGlobalCheckpoint(); + cancellableThreads.execute(() -> recoveryTarget.finalizeRecovery(globalCheckpoint)); + shard.updateGlobalCheckpointForShard(request.targetAllocationId(), globalCheckpoint); if (request.isPrimaryRelocation()) { logger.trace("performing relocation hand-off"); diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java index e590e12895591..07dd1ae9ed1ac 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java @@ -131,6 +131,7 @@ public void testReplication() throws Exception { assertThat(primary.knownLocalCheckpoints.remove(primaryShard.allocationId().getId()), equalTo(primary.localCheckpoint)); assertThat(primary.knownLocalCheckpoints, equalTo(replicasProxy.generatedLocalCheckpoints)); + assertThat(primary.knownGlobalCheckpoints, equalTo(replicasProxy.generatedGlobalCheckpoints)); } public void testDemotedPrimary() throws Exception { @@ -380,6 +381,7 @@ static class TestPrimary implements ReplicationOperation.Primary clusterStateSupplier; final Map knownLocalCheckpoints = new HashMap<>(); + final Map knownGlobalCheckpoints = new HashMap<>(); TestPrimary(ShardRouting routing, Supplier clusterStateSupplier) { this.routing = routing; @@ -434,6 +436,11 @@ public void updateLocalCheckpointForShard(String allocationId, long checkpoint) knownLocalCheckpoints.put(allocationId, checkpoint); } + @Override + public void updateGlobalCheckpointForShard(String allocationId, long globalCheckpoint) { + knownGlobalCheckpoints.put(allocationId, globalCheckpoint); + } + @Override public long localCheckpoint() { return localCheckpoint; @@ -455,15 +462,23 @@ public ReplicationGroup getReplicationGroup() { static class ReplicaResponse implements ReplicationOperation.ReplicaResponse { final long localCheckpoint; + final long globalCheckpoint; - ReplicaResponse(long localCheckpoint) { + ReplicaResponse(long localCheckpoint, long globalCheckpoint) { this.localCheckpoint = localCheckpoint; + this.globalCheckpoint = globalCheckpoint; } @Override public long localCheckpoint() { return localCheckpoint; } + + @Override + public long globalCheckpoint() { + return globalCheckpoint; + } + } static class TestReplicaProxy implements ReplicationOperation.Replicas { @@ -474,6 +489,8 @@ static class TestReplicaProxy implements ReplicationOperation.Replicas final Map generatedLocalCheckpoints = ConcurrentCollections.newConcurrentMap(); + final Map generatedGlobalCheckpoints = ConcurrentCollections.newConcurrentMap(); + final Set markedAsStaleCopies = ConcurrentCollections.newConcurrentSet(); final long primaryTerm; @@ -497,11 +514,12 @@ public void performOn( if (opFailures.containsKey(replica)) { listener.onFailure(opFailures.get(replica)); } else { - final long checkpoint = random().nextLong(); + final long generatedLocalCheckpoint = random().nextLong(); + final long generatedGlobalCheckpoint = random().nextLong(); final String allocationId = replica.allocationId().getId(); - Long existing = generatedLocalCheckpoints.put(allocationId, checkpoint); - assertNull(existing); - listener.onResponse(new ReplicaResponse(checkpoint)); + assertNull(generatedLocalCheckpoints.put(allocationId, generatedLocalCheckpoint)); + assertNull(generatedGlobalCheckpoints.put(allocationId, generatedGlobalCheckpoint)); + listener.onResponse(new ReplicaResponse(generatedLocalCheckpoint, generatedGlobalCheckpoint)); } } diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 1c8aa7079997e..1127a5ced580d 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -639,7 +639,8 @@ public void testReplicaProxy() throws InterruptedException, ExecutionException { CapturingTransport.CapturedRequest[] captures = transport.getCapturedRequestsAndClear(); assertThat(captures, arrayWithSize(1)); if (randomBoolean()) { - final TransportReplicationAction.ReplicaResponse response = new TransportReplicationAction.ReplicaResponse(randomLong()); + final TransportReplicationAction.ReplicaResponse response = + new TransportReplicationAction.ReplicaResponse(randomLong(), randomLong()); transport.handleResponse(captures[0].requestId, response); assertTrue(listener.isDone()); assertThat(listener.get(), equalTo(response)); diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index b1a1562073881..b3db10f920973 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -289,7 +289,8 @@ public void testReplicaProxy() throws InterruptedException, ExecutionException { CapturingTransport.CapturedRequest[] captures = transport.getCapturedRequestsAndClear(); assertThat(captures, arrayWithSize(1)); if (randomBoolean()) { - final TransportReplicationAction.ReplicaResponse response = new TransportReplicationAction.ReplicaResponse(randomLong()); + final TransportReplicationAction.ReplicaResponse response = + new TransportReplicationAction.ReplicaResponse(randomLong(), randomLong()); transport.handleResponse(captures[0].requestId, response); assertTrue(listener.isDone()); assertThat(listener.get(), equalTo(response)); diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 0ea47392d5c21..8b78227ca3121 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -82,7 +82,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; @@ -2028,7 +2027,7 @@ public void testSeqNoAndCheckpoints() throws IOException { final Set indexedIds = new HashSet<>(); long localCheckpoint = SequenceNumbers.NO_OPS_PERFORMED; long replicaLocalCheckpoint = SequenceNumbers.NO_OPS_PERFORMED; - long globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; + final long globalCheckpoint; long maxSeqNo = SequenceNumbers.NO_OPS_PERFORMED; InternalEngine initialEngine = null; @@ -2039,7 +2038,7 @@ public void testSeqNoAndCheckpoints() throws IOException { initialEngine.seqNoService().updateAllocationIdsFromMaster(1L, new HashSet<>(Arrays.asList(primary.allocationId().getId(), replica.allocationId().getId())), new IndexShardRoutingTable.Builder(shardId).addShard(primary).addShard(replica).build(), Collections.emptySet()); - initialEngine.seqNoService().activatePrimaryMode(primary.allocationId().getId(), primarySeqNo); + initialEngine.seqNoService().activatePrimaryMode(primarySeqNo); for (int op = 0; op < opCount; op++) { final String id; // mostly index, sometimes delete diff --git a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 93ebb319063f1..3faa2da7b4ffe 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -481,6 +481,11 @@ public void updateLocalCheckpointForShard(String allocationId, long checkpoint) replicationGroup.getPrimary().updateLocalCheckpointForShard(allocationId, checkpoint); } + @Override + public void updateGlobalCheckpointForShard(String allocationId, long globalCheckpoint) { + replicationGroup.getPrimary().updateGlobalCheckpointForShard(allocationId, globalCheckpoint); + } + @Override public long localCheckpoint() { return replicationGroup.getPrimary().getLocalCheckpoint(); @@ -518,7 +523,7 @@ public void onResponse(Releasable releasable) { try { performOnReplica(request, replica); releasable.close(); - listener.onResponse(new ReplicaResponse(replica.getLocalCheckpoint())); + listener.onResponse(new ReplicaResponse(replica.getLocalCheckpoint(), replica.getGlobalCheckpoint())); } catch (final Exception e) { Releasables.closeWhileHandlingException(releasable); listener.onFailure(e); diff --git a/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 562e40a790dda..96f6aa6d47acb 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -293,7 +293,6 @@ public void testResyncAfterPrimaryPromotion() throws Exception { final IndexShard oldPrimary = shards.getPrimary(); final IndexShard newPrimary = shards.getReplicas().get(0); - final IndexShard otherReplica = shards.getReplicas().get(1); // simulate docs that were inflight when primary failed final int extraDocs = randomIntBetween(0, 5); diff --git a/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointTrackerTests.java b/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointTrackerTests.java index 4741921cdf917..dcaab38be5cfb 100644 --- a/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointTrackerTests.java +++ b/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointTrackerTests.java @@ -25,7 +25,6 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.common.Randomness; -import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -35,10 +34,10 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; -import org.junit.Before; import java.io.IOException; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -75,13 +74,24 @@ private Map randomAllocationsWithLocalCheckpoints(int min, i return allocations; } - private static IndexShardRoutingTable routingTable(Set initializingIds) { + private static IndexShardRoutingTable routingTable(final Set initializingIds, final AllocationId primaryId) { + final ShardId shardId = new ShardId("test", "_na_", 0); + final ShardRouting primaryShard = + TestShardRouting.newShardRouting(shardId, randomAlphaOfLength(10), null, true, ShardRoutingState.STARTED, primaryId); + return routingTable(initializingIds, primaryShard); + } + + private static IndexShardRoutingTable routingTable(final Set initializingIds, final ShardRouting primaryShard) { + assert !initializingIds.contains(primaryShard.allocationId()); ShardId shardId = new ShardId("test", "_na_", 0); IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(shardId); for (AllocationId initializingId : initializingIds) { - builder.addShard(TestShardRouting.newShardRouting(shardId, randomAlphaOfLength(10), null, false, ShardRoutingState.INITIALIZING, - initializingId)); + builder.addShard(TestShardRouting.newShardRouting( + shardId, randomAlphaOfLength(10), null, false, ShardRoutingState.INITIALIZING, initializingId)); } + + builder.addShard(primaryShard); + return builder.build(); } @@ -104,7 +114,9 @@ public void testGlobalCheckpointUpdate() { // it is however nice not to assume this on this level and check we do the right thing. final long minLocalCheckpoint = allocations.values().stream().min(Long::compare).orElse(UNASSIGNED_SEQ_NO); - final GlobalCheckpointTracker tracker = newTracker(active.iterator().next()); + + final AllocationId primaryId = active.iterator().next(); + final GlobalCheckpointTracker tracker = newTracker(primaryId); assertThat(tracker.getGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); logger.info("--> using allocations"); @@ -120,7 +132,7 @@ public void testGlobalCheckpointUpdate() { logger.info(" - [{}], local checkpoint [{}], [{}]", aId, allocations.get(aId), type); }); - tracker.updateFromMaster(initialClusterStateVersion, ids(active), routingTable(initializing), emptySet()); + tracker.updateFromMaster(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId), emptySet()); tracker.activatePrimaryMode(NO_OPS_PERFORMED); initializing.forEach(aId -> markAllocationIdAsInSyncQuietly(tracker, aId.getId(), NO_OPS_PERFORMED)); allocations.keySet().forEach(aId -> tracker.updateLocalCheckpoint(aId.getId(), allocations.get(aId))); @@ -140,12 +152,12 @@ public void testGlobalCheckpointUpdate() { // first check that adding it without the master blessing doesn't change anything. tracker.updateLocalCheckpoint(extraId.getId(), minLocalCheckpointAfterUpdates + 1 + randomInt(4)); - assertNull(tracker.localCheckpoints.get(extraId)); + assertNull(tracker.checkpoints.get(extraId)); expectThrows(IllegalStateException.class, () -> tracker.initiateTracking(extraId.getId())); Set newInitializing = new HashSet<>(initializing); newInitializing.add(extraId); - tracker.updateFromMaster(initialClusterStateVersion + 1, ids(active), routingTable(newInitializing), emptySet()); + tracker.updateFromMaster(initialClusterStateVersion + 1, ids(active), routingTable(newInitializing, primaryId), emptySet()); tracker.initiateTracking(extraId.getId()); @@ -167,9 +179,9 @@ public void testMissingActiveIdsPreventAdvance() { final Map assigned = new HashMap<>(); assigned.putAll(active); assigned.putAll(initializing); - AllocationId primary = active.keySet().iterator().next(); - final GlobalCheckpointTracker tracker = newTracker(primary); - tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet()), emptySet()); + AllocationId primaryId = active.keySet().iterator().next(); + final GlobalCheckpointTracker tracker = newTracker(primaryId); + tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId), emptySet()); tracker.activatePrimaryMode(NO_OPS_PERFORMED); randomSubsetOf(initializing.keySet()).forEach(k -> markAllocationIdAsInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); final AllocationId missingActiveID = randomFrom(active.keySet()); @@ -179,7 +191,7 @@ public void testMissingActiveIdsPreventAdvance() { .filter(e -> !e.getKey().equals(missingActiveID)) .forEach(e -> tracker.updateLocalCheckpoint(e.getKey().getId(), e.getValue())); - if (missingActiveID.equals(primary) == false) { + if (missingActiveID.equals(primaryId) == false) { assertThat(tracker.getGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); } // now update all knowledge of all shards @@ -192,9 +204,9 @@ public void testMissingInSyncIdsPreventAdvance() { final Map initializing = randomAllocationsWithLocalCheckpoints(2, 5); logger.info("active: {}, initializing: {}", active, initializing); - AllocationId primary = active.keySet().iterator().next(); - final GlobalCheckpointTracker tracker = newTracker(primary); - tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet()), emptySet()); + AllocationId primaryId = active.keySet().iterator().next(); + final GlobalCheckpointTracker tracker = newTracker(primaryId); + tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId), emptySet()); tracker.activatePrimaryMode(NO_OPS_PERFORMED); randomSubsetOf(randomIntBetween(1, initializing.size() - 1), initializing.keySet()).forEach(aId -> markAllocationIdAsInSyncQuietly(tracker, aId.getId(), NO_OPS_PERFORMED)); @@ -212,8 +224,9 @@ public void testInSyncIdsAreIgnoredIfNotValidatedByMaster() { final Map active = randomAllocationsWithLocalCheckpoints(1, 5); final Map initializing = randomAllocationsWithLocalCheckpoints(1, 5); final Map nonApproved = randomAllocationsWithLocalCheckpoints(1, 5); - final GlobalCheckpointTracker tracker = newTracker(active.keySet().iterator().next()); - tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet()), emptySet()); + final AllocationId primaryId = active.keySet().iterator().next(); + final GlobalCheckpointTracker tracker = newTracker(primaryId); + tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId), emptySet()); tracker.activatePrimaryMode(NO_OPS_PERFORMED); initializing.keySet().forEach(k -> markAllocationIdAsInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); nonApproved.keySet().forEach(k -> @@ -235,6 +248,10 @@ public void testInSyncIdsAreRemovedIfNotValidatedByMaster() { final Set active = Sets.union(activeToStay.keySet(), activeToBeRemoved.keySet()); final Set initializing = Sets.union(initializingToStay.keySet(), initializingToBeRemoved.keySet()); final Map allocations = new HashMap<>(); + final AllocationId primaryId = active.iterator().next(); + if (activeToBeRemoved.containsKey(primaryId)) { + activeToStay.put(primaryId, activeToBeRemoved.remove(primaryId)); + } allocations.putAll(activeToStay); if (randomBoolean()) { allocations.putAll(activeToBeRemoved); @@ -243,8 +260,8 @@ public void testInSyncIdsAreRemovedIfNotValidatedByMaster() { if (randomBoolean()) { allocations.putAll(initializingToBeRemoved); } - final GlobalCheckpointTracker tracker = newTracker(active.iterator().next()); - tracker.updateFromMaster(initialClusterStateVersion, ids(active), routingTable(initializing), emptySet()); + final GlobalCheckpointTracker tracker = newTracker(primaryId); + tracker.updateFromMaster(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId), emptySet()); tracker.activatePrimaryMode(NO_OPS_PERFORMED); if (randomBoolean()) { initializingToStay.keySet().forEach(k -> markAllocationIdAsInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); @@ -257,13 +274,19 @@ public void testInSyncIdsAreRemovedIfNotValidatedByMaster() { // now remove shards if (randomBoolean()) { - tracker.updateFromMaster(initialClusterStateVersion + 1, ids(activeToStay.keySet()), routingTable(initializingToStay.keySet()), - emptySet()); + tracker.updateFromMaster( + initialClusterStateVersion + 1, + ids(activeToStay.keySet()), + routingTable(initializingToStay.keySet(), primaryId), + emptySet()); allocations.forEach((aid, ckp) -> tracker.updateLocalCheckpoint(aid.getId(), ckp + 10L)); } else { allocations.forEach((aid, ckp) -> tracker.updateLocalCheckpoint(aid.getId(), ckp + 10L)); - tracker.updateFromMaster(initialClusterStateVersion + 2, ids(activeToStay.keySet()), routingTable(initializingToStay.keySet()), - emptySet()); + tracker.updateFromMaster( + initialClusterStateVersion + 2, + ids(activeToStay.keySet()), + routingTable(initializingToStay.keySet(), primaryId), + emptySet()); } final long checkpoint = Stream.concat(activeToStay.values().stream(), initializingToStay.values().stream()) @@ -281,7 +304,7 @@ public void testWaitForAllocationIdToBeInSync() throws Exception { final AllocationId trackingAllocationId = AllocationId.newInitializing(); final GlobalCheckpointTracker tracker = newTracker(inSyncAllocationId); tracker.updateFromMaster(randomNonNegativeLong(), Collections.singleton(inSyncAllocationId.getId()), - routingTable(Collections.singleton(trackingAllocationId)), emptySet()); + routingTable(Collections.singleton(trackingAllocationId), inSyncAllocationId), emptySet()); tracker.activatePrimaryMode(globalCheckpoint); final Thread thread = new Thread(() -> { try { @@ -337,7 +360,7 @@ public void testWaitForAllocationIdToBeInSyncCanBeInterrupted() throws BrokenBar final AllocationId trackingAllocationId = AllocationId.newInitializing(); final GlobalCheckpointTracker tracker = newTracker(inSyncAllocationId); tracker.updateFromMaster(randomNonNegativeLong(), Collections.singleton(inSyncAllocationId.getId()), - routingTable(Collections.singleton(trackingAllocationId)), emptySet()); + routingTable(Collections.singleton(trackingAllocationId), inSyncAllocationId), emptySet()); tracker.activatePrimaryMode(globalCheckpoint); final Thread thread = new Thread(() -> { try { @@ -382,8 +405,8 @@ public void testUpdateAllocationIdsFromMaster() throws Exception { randomActiveAndInitializingAllocationIds(numberOfActiveAllocationsIds, numberOfInitializingIds); final Set activeAllocationIds = activeAndInitializingAllocationIds.v1(); final Set initializingIds = activeAndInitializingAllocationIds.v2(); - IndexShardRoutingTable routingTable = routingTable(initializingIds); AllocationId primaryId = activeAllocationIds.iterator().next(); + IndexShardRoutingTable routingTable = routingTable(initializingIds, primaryId); final GlobalCheckpointTracker tracker = newTracker(primaryId); tracker.updateFromMaster(initialClusterStateVersion, ids(activeAllocationIds), routingTable, emptySet()); tracker.activatePrimaryMode(NO_OPS_PERFORMED); @@ -408,14 +431,14 @@ public void testUpdateAllocationIdsFromMaster() throws Exception { // now we will remove some allocation IDs from these and ensure that they propagate through final Set removingActiveAllocationIds = new HashSet<>(randomSubsetOf(activeAllocationIds)); + removingActiveAllocationIds.remove(primaryId); final Set newActiveAllocationIds = activeAllocationIds.stream().filter(a -> !removingActiveAllocationIds.contains(a)).collect(Collectors.toSet()); final List removingInitializingAllocationIds = randomSubsetOf(initializingIds); final Set newInitializingAllocationIds = initializingIds.stream().filter(a -> !removingInitializingAllocationIds.contains(a)).collect(Collectors.toSet()); - routingTable = routingTable(newInitializingAllocationIds); - tracker.updateFromMaster(initialClusterStateVersion + 1, ids(newActiveAllocationIds), routingTable, - emptySet()); + routingTable = routingTable(newInitializingAllocationIds, primaryId); + tracker.updateFromMaster(initialClusterStateVersion + 1, ids(newActiveAllocationIds), routingTable, emptySet()); assertTrue(newActiveAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); assertTrue(removingActiveAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()) == null)); assertTrue(newInitializingAllocationIds.stream().noneMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); @@ -429,8 +452,11 @@ public void testUpdateAllocationIdsFromMaster() throws Exception { * than we have been using above ensures that we can not collide with a previous allocation ID */ newInitializingAllocationIds.add(AllocationId.newInitializing()); - tracker.updateFromMaster(initialClusterStateVersion + 2, ids(newActiveAllocationIds), routingTable(newInitializingAllocationIds), - emptySet()); + tracker.updateFromMaster( + initialClusterStateVersion + 2, + ids(newActiveAllocationIds), + routingTable(newInitializingAllocationIds, primaryId), + emptySet()); assertTrue(newActiveAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); assertTrue( newActiveAllocationIds @@ -473,8 +499,11 @@ public void testUpdateAllocationIdsFromMaster() throws Exception { // using a different length than we have been using above ensures that we can not collide with a previous allocation ID final AllocationId newSyncingAllocationId = AllocationId.newInitializing(); newInitializingAllocationIds.add(newSyncingAllocationId); - tracker.updateFromMaster(initialClusterStateVersion + 3, ids(newActiveAllocationIds), routingTable(newInitializingAllocationIds), - emptySet()); + tracker.updateFromMaster( + initialClusterStateVersion + 3, + ids(newActiveAllocationIds), + routingTable(newInitializingAllocationIds, primaryId), + emptySet()); final CyclicBarrier barrier = new CyclicBarrier(2); final Thread thread = new Thread(() -> { try { @@ -508,8 +537,11 @@ public void testUpdateAllocationIdsFromMaster() throws Exception { * the in-sync set even if we receive a cluster state update that does not reflect this. * */ - tracker.updateFromMaster(initialClusterStateVersion + 4, ids(newActiveAllocationIds), routingTable(newInitializingAllocationIds), - emptySet()); + tracker.updateFromMaster( + initialClusterStateVersion + 4, + ids(newActiveAllocationIds), + routingTable(newInitializingAllocationIds, primaryId), + emptySet()); assertTrue(tracker.getTrackedLocalCheckpointForShard(newSyncingAllocationId.getId()).inSync); assertFalse(tracker.pendingInSync.contains(newSyncingAllocationId.getId())); } @@ -534,8 +566,11 @@ public void testRaceUpdatingGlobalCheckpoint() throws InterruptedException, Brok final int activeLocalCheckpoint = randomIntBetween(0, Integer.MAX_VALUE - 1); final GlobalCheckpointTracker tracker = newTracker(active); - tracker.updateFromMaster(randomNonNegativeLong(), Collections.singleton(active.getId()), - routingTable(Collections.singleton(initializing)), emptySet()); + tracker.updateFromMaster( + randomNonNegativeLong(), + Collections.singleton(active.getId()), + routingTable(Collections.singleton(initializing), active), + emptySet()); tracker.activatePrimaryMode(activeLocalCheckpoint); final int nextActiveLocalCheckpoint = randomIntBetween(activeLocalCheckpoint + 1, Integer.MAX_VALUE); final Thread activeThread = new Thread(() -> { @@ -583,20 +618,23 @@ public void testPrimaryContextHandoff() throws IOException { final ShardId shardId = new ShardId("test", "_na_", 0); FakeClusterState clusterState = initialState(); + final AllocationId primaryAllocationId = clusterState.routingTable.primaryShard().allocationId(); GlobalCheckpointTracker oldPrimary = - new GlobalCheckpointTracker(shardId, randomFrom(ids(clusterState.inSyncIds)), indexSettings, UNASSIGNED_SEQ_NO); + new GlobalCheckpointTracker(shardId, primaryAllocationId.getId(), indexSettings, UNASSIGNED_SEQ_NO); GlobalCheckpointTracker newPrimary = - new GlobalCheckpointTracker(shardId, UUIDs.randomBase64UUID(random()), indexSettings, UNASSIGNED_SEQ_NO); + new GlobalCheckpointTracker(shardId, primaryAllocationId.getRelocationId(), indexSettings, UNASSIGNED_SEQ_NO); + + Set allocationIds = new HashSet<>(Arrays.asList(oldPrimary.shardAllocationId, newPrimary.shardAllocationId)); clusterState.apply(oldPrimary); clusterState.apply(newPrimary); - activatePrimary(clusterState, oldPrimary); + activatePrimary(oldPrimary); final int numUpdates = randomInt(10); for (int i = 0; i < numUpdates; i++) { if (rarely()) { - clusterState = randomUpdateClusterState(clusterState); + clusterState = randomUpdateClusterState(allocationIds, clusterState); clusterState.apply(oldPrimary); clusterState.apply(newPrimary); } @@ -608,12 +646,18 @@ public void testPrimaryContextHandoff() throws IOException { } } + // simulate transferring the global checkpoint to the new primary after finalizing recovery before the handoff + markAllocationIdAsInSyncQuietly( + oldPrimary, + newPrimary.shardAllocationId, + Math.max(SequenceNumbers.NO_OPS_PERFORMED, oldPrimary.getGlobalCheckpoint() + randomInt(5))); + oldPrimary.updateGlobalCheckpointForShard(newPrimary.shardAllocationId, oldPrimary.getGlobalCheckpoint()); GlobalCheckpointTracker.PrimaryContext primaryContext = oldPrimary.startRelocationHandoff(); if (randomBoolean()) { // cluster state update after primary context handoff if (randomBoolean()) { - clusterState = randomUpdateClusterState(clusterState); + clusterState = randomUpdateClusterState(allocationIds, clusterState); clusterState.apply(oldPrimary); clusterState.apply(newPrimary); } @@ -622,7 +666,7 @@ public void testPrimaryContextHandoff() throws IOException { oldPrimary.abortRelocationHandoff(); if (rarely()) { - clusterState = randomUpdateClusterState(clusterState); + clusterState = randomUpdateClusterState(allocationIds, clusterState); clusterState.apply(oldPrimary); clusterState.apply(newPrimary); } @@ -642,11 +686,10 @@ public void testPrimaryContextHandoff() throws IOException { primaryContext.writeTo(output); StreamInput streamInput = output.bytes().streamInput(); primaryContext = new GlobalCheckpointTracker.PrimaryContext(streamInput); - switch (randomInt(3)) { case 0: { // apply cluster state update on old primary while primary context is being transferred - clusterState = randomUpdateClusterState(clusterState); + clusterState = randomUpdateClusterState(allocationIds, clusterState); clusterState.apply(oldPrimary); // activate new primary newPrimary.activateWithPrimaryContext(primaryContext); @@ -656,7 +699,7 @@ public void testPrimaryContextHandoff() throws IOException { } case 1: { // apply cluster state update on new primary while primary context is being transferred - clusterState = randomUpdateClusterState(clusterState); + clusterState = randomUpdateClusterState(allocationIds, clusterState); clusterState.apply(newPrimary); // activate new primary newPrimary.activateWithPrimaryContext(primaryContext); @@ -666,7 +709,7 @@ public void testPrimaryContextHandoff() throws IOException { } case 2: { // apply cluster state update on both copies while primary context is being transferred - clusterState = randomUpdateClusterState(clusterState); + clusterState = randomUpdateClusterState(allocationIds, clusterState); clusterState.apply(oldPrimary); clusterState.apply(newPrimary); newPrimary.activateWithPrimaryContext(primaryContext); @@ -682,8 +725,32 @@ public void testPrimaryContextHandoff() throws IOException { assertTrue(oldPrimary.primaryMode); assertTrue(newPrimary.primaryMode); assertThat(newPrimary.appliedClusterStateVersion, equalTo(oldPrimary.appliedClusterStateVersion)); - assertThat(newPrimary.localCheckpoints, equalTo(oldPrimary.localCheckpoints)); - assertThat(newPrimary.globalCheckpoint, equalTo(oldPrimary.globalCheckpoint)); + /* + * We can not assert on shared knowledge of the global checkpoint between the old primary and the new primary as the new primary + * will update its global checkpoint state without the old primary learning of it, and the old primary could have updated its + * global checkpoint state after the primary context was transferred. + */ + Map oldPrimaryCheckpointsCopy = new HashMap<>(oldPrimary.checkpoints); + oldPrimaryCheckpointsCopy.remove(oldPrimary.shardAllocationId); + oldPrimaryCheckpointsCopy.remove(newPrimary.shardAllocationId); + Map newPrimaryCheckpointsCopy = new HashMap<>(newPrimary.checkpoints); + newPrimaryCheckpointsCopy.remove(oldPrimary.shardAllocationId); + newPrimaryCheckpointsCopy.remove(newPrimary.shardAllocationId); + assertThat(newPrimaryCheckpointsCopy, equalTo(oldPrimaryCheckpointsCopy)); + // we can however assert that shared knowledge of the local checkpoint and in-sync status is equal + assertThat( + oldPrimary.checkpoints.get(oldPrimary.shardAllocationId).localCheckpoint, + equalTo(newPrimary.checkpoints.get(oldPrimary.shardAllocationId).localCheckpoint)); + assertThat( + oldPrimary.checkpoints.get(newPrimary.shardAllocationId).localCheckpoint, + equalTo(newPrimary.checkpoints.get(newPrimary.shardAllocationId).localCheckpoint)); + assertThat( + oldPrimary.checkpoints.get(oldPrimary.shardAllocationId).inSync, + equalTo(newPrimary.checkpoints.get(oldPrimary.shardAllocationId).inSync)); + assertThat( + oldPrimary.checkpoints.get(newPrimary.shardAllocationId).inSync, + equalTo(newPrimary.checkpoints.get(newPrimary.shardAllocationId).inSync)); + assertThat(newPrimary.getGlobalCheckpoint(), equalTo(oldPrimary.getGlobalCheckpoint())); assertThat(newPrimary.routingTable, equalTo(oldPrimary.routingTable)); assertThat(newPrimary.replicationGroup, equalTo(oldPrimary.replicationGroup)); @@ -696,7 +763,7 @@ public void testIllegalStateExceptionIfUnknownAllocationId() { final AllocationId initializing = AllocationId.newInitializing(); final GlobalCheckpointTracker tracker = newTracker(active); tracker.updateFromMaster(randomNonNegativeLong(), Collections.singleton(active.getId()), - routingTable(Collections.singleton(initializing)), emptySet()); + routingTable(Collections.singleton(initializing), active), emptySet()); tracker.activatePrimaryMode(NO_OPS_PERFORMED); expectThrows(IllegalStateException.class, () -> tracker.initiateTracking(randomAlphaOfLength(10))); @@ -733,38 +800,58 @@ private static FakeClusterState initialState() { final int numberOfActiveAllocationsIds = randomIntBetween(1, 8); final int numberOfInitializingIds = randomIntBetween(0, 8); final Tuple, Set> activeAndInitializingAllocationIds = - randomActiveAndInitializingAllocationIds(numberOfActiveAllocationsIds, numberOfInitializingIds); + randomActiveAndInitializingAllocationIds(numberOfActiveAllocationsIds, numberOfInitializingIds); final Set activeAllocationIds = activeAndInitializingAllocationIds.v1(); final Set initializingAllocationIds = activeAndInitializingAllocationIds.v2(); - return new FakeClusterState(initialClusterStateVersion, activeAllocationIds, routingTable(initializingAllocationIds)); + final AllocationId primaryId = randomFrom(activeAllocationIds); + final AllocationId relocatingId = AllocationId.newRelocation(primaryId); + activeAllocationIds.remove(primaryId); + activeAllocationIds.add(relocatingId); + final ShardId shardId = new ShardId("test", "_na_", 0); + final ShardRouting primaryShard = + TestShardRouting.newShardRouting( + shardId, randomAlphaOfLength(10), randomAlphaOfLength(10), true, ShardRoutingState.RELOCATING, relocatingId); + + return new FakeClusterState( + initialClusterStateVersion, + activeAllocationIds, + routingTable(initializingAllocationIds, primaryShard)); } - private static void activatePrimary(FakeClusterState clusterState, GlobalCheckpointTracker gcp) { + private static void activatePrimary(GlobalCheckpointTracker gcp) { gcp.activatePrimaryMode(randomIntBetween(Math.toIntExact(NO_OPS_PERFORMED), 10)); } private static void randomLocalCheckpointUpdate(GlobalCheckpointTracker gcp) { - String allocationId = randomFrom(gcp.localCheckpoints.keySet()); - long currentLocalCheckpoint = gcp.localCheckpoints.get(allocationId).getLocalCheckpoint(); + String allocationId = randomFrom(gcp.checkpoints.keySet()); + long currentLocalCheckpoint = gcp.checkpoints.get(allocationId).getLocalCheckpoint(); gcp.updateLocalCheckpoint(allocationId, Math.max(SequenceNumbers.NO_OPS_PERFORMED, currentLocalCheckpoint + randomInt(5))); } private static void randomMarkInSync(GlobalCheckpointTracker gcp) { - String allocationId = randomFrom(gcp.localCheckpoints.keySet()); + String allocationId = randomFrom(gcp.checkpoints.keySet()); long newLocalCheckpoint = Math.max(NO_OPS_PERFORMED, gcp.getGlobalCheckpoint() + randomInt(5)); markAllocationIdAsInSyncQuietly(gcp, allocationId, newLocalCheckpoint); } - private static FakeClusterState randomUpdateClusterState(FakeClusterState clusterState) { - final Set initializingIdsToAdd = randomAllocationIdsExcludingExistingIds(clusterState.allIds(), randomInt(2)); + private static FakeClusterState randomUpdateClusterState(Set allocationIds, FakeClusterState clusterState) { + final Set initializingIdsToAdd = + randomAllocationIdsExcludingExistingIds(exclude(clusterState.allIds(), allocationIds), randomInt(2)); final Set initializingIdsToRemove = new HashSet<>( - randomSubsetOf(randomInt(clusterState.initializingIds().size()), clusterState.initializingIds())); + exclude(randomSubsetOf(randomInt(clusterState.initializingIds().size()), clusterState.initializingIds()), allocationIds)); final Set inSyncIdsToRemove = new HashSet<>( - randomSubsetOf(randomInt(clusterState.inSyncIds.size()), clusterState.inSyncIds)); + exclude(randomSubsetOf(randomInt(clusterState.inSyncIds.size()), clusterState.inSyncIds), allocationIds)); final Set remainingInSyncIds = Sets.difference(clusterState.inSyncIds, inSyncIdsToRemove); - return new FakeClusterState(clusterState.version + randomIntBetween(1, 5), - remainingInSyncIds.isEmpty() ? clusterState.inSyncIds : remainingInSyncIds, - routingTable(Sets.difference(Sets.union(clusterState.initializingIds(), initializingIdsToAdd), initializingIdsToRemove))); + return new FakeClusterState( + clusterState.version + randomIntBetween(1, 5), + remainingInSyncIds.isEmpty() ? clusterState.inSyncIds : remainingInSyncIds, + routingTable( + Sets.difference(Sets.union(clusterState.initializingIds(), initializingIdsToAdd), initializingIdsToRemove), + clusterState.routingTable.primaryShard())); + } + + private static Set exclude(Collection allocationIds, Set excludeIds) { + return allocationIds.stream().filter(aId -> !excludeIds.contains(aId.getId())).collect(Collectors.toSet()); } private static Tuple, Set> randomActiveAndInitializingAllocationIds( diff --git a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java index 4eb4f9606aea1..04e1a846bd64f 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java @@ -20,6 +20,7 @@ package org.elasticsearch.recovery; import com.carrotsearch.hppc.IntHashSet; +import com.carrotsearch.hppc.ObjectLongMap; import com.carrotsearch.hppc.procedures.IntProcedure; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.util.English; @@ -34,6 +35,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; @@ -44,12 +46,14 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.IndexService; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; import org.elasticsearch.plugins.Plugin; @@ -118,8 +122,14 @@ protected void beforeIndexDeletion() throws Exception { } ShardStats primary = maybePrimary.get(); final SeqNoStats primarySeqNoStats = primary.getSeqNoStats(); - assertThat(primary.getShardRouting() + " should have set the global checkpoint", + final ShardRouting primaryShardRouting = primary.getShardRouting(); + assertThat(primaryShardRouting + " should have set the global checkpoint", primarySeqNoStats.getGlobalCheckpoint(), not(equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO))); + final DiscoveryNode node = clusterService().state().nodes().get(primaryShardRouting.currentNodeId()); + final IndicesService indicesService = + internalCluster().getInstance(IndicesService.class, node.getName()); + final IndexShard indexShard = indicesService.getShardOrNull(primaryShardRouting.shardId()); + final ObjectLongMap globalCheckpoints = indexShard.getGlobalCheckpoints(); for (ShardStats shardStats : indexShardStats) { final SeqNoStats seqNoStats = shardStats.getSeqNoStats(); assertThat(shardStats.getShardRouting() + " local checkpoint mismatch", @@ -128,6 +138,10 @@ protected void beforeIndexDeletion() throws Exception { seqNoStats.getGlobalCheckpoint(), equalTo(primarySeqNoStats.getGlobalCheckpoint())); assertThat(shardStats.getShardRouting() + " max seq no mismatch", seqNoStats.getMaxSeqNo(), equalTo(primarySeqNoStats.getMaxSeqNo())); + // the local knowledge on the primary of the global checkpoint equals the global checkpoint on the shard + assertThat( + seqNoStats.getGlobalCheckpoint(), + equalTo(globalCheckpoints.get(shardStats.getShardRouting().allocationId().getId()))); } } } From 52e80a9292eedf3e1d37706e6e0b1c82484557c9 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 18 Sep 2017 06:28:50 -0400 Subject: [PATCH 52/67] Reenable BWC tests after disabling for backport This commit reenables the BWC tests after they were disabled for backporting the change to track global checkpoints of shard copies on the primary. Relates #26666 --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index 7b1e517a8586b..cfc8401a934e0 100644 --- a/build.gradle +++ b/build.gradle @@ -186,7 +186,7 @@ task verifyVersions { * after the backport of the backcompat code is complete. */ allprojects { - ext.bwc_tests_enabled = false + ext.bwc_tests_enabled = true } task verifyBwcTestsEnabled { From 5dd476feb5cd9f96f7f517dd50ce5bac6df101cb Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 18 Sep 2017 06:43:02 -0400 Subject: [PATCH 53/67] Skip bad request REST test on pre-6.0 This commit adds a skip for the bad request REST test on pre-6.0 nodes. Previously, a request for /_(.*) where $1 is not an existing endpoint would return a 404. This is because the request would be treated as a get index request for an index named _$1. However, an index can never start with "_" so logic was added to detect this and return a 400 instead as this should be treated as a bad request. During the mixed-cluster BWC tests, a node running pre-6.0 code will still return a 404 though. Therefore, this test needs to skipped in such a mixed-cluster scenario. --- .../main/resources/rest-api-spec/test/indices.get/10_basic.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/10_basic.yml index 943cbcf65d144..e30af208aeb85 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/10_basic.yml @@ -162,6 +162,9 @@ setup: --- "Should return an exception when querying invalid indices": + - skip: + version: " - 5.99.99" + reason: "bad request logic added in 6.0.0" - do: catch: bad_request From 6f25163aef7048896304ac4943054a030dafa694 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 18 Sep 2017 06:51:22 -0400 Subject: [PATCH 54/67] Filter pre-6.0 nodes for checkpoint invariants When checking that the global checkpoint on the primary is consistent with the local checkpoints of the in-sync shards, we have to filter pre-6.0 nodes from the check or the invariant will trivially trip. This commit filters these nodes out when checking this invariant. Relates #26666 --- .../org/elasticsearch/index/seqno/GlobalCheckpointTracker.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointTracker.java b/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointTracker.java index 416d6abdcb4cd..ee1f2d7c8715a 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointTracker.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointTracker.java @@ -316,7 +316,7 @@ private static long inSyncCheckpointStates( .stream() .filter(cps -> cps.inSync) .mapToLong(function) - .filter(v -> v != SequenceNumbers.UNASSIGNED_SEQ_NO)); + .filter(v -> v != SequenceNumbersService.PRE_60_NODE_CHECKPOINT && v != SequenceNumbers.UNASSIGNED_SEQ_NO)); return value.isPresent() ? value.getAsLong() : SequenceNumbers.UNASSIGNED_SEQ_NO; } From 23093adcb98c74952c9bf46f1a282e332fd857c9 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 18 Sep 2017 07:48:08 -0400 Subject: [PATCH 55/67] Update global checkpoint with permit after recovery After recovery completes from a primary, we now update the local knowledge on the primary of the global checkpoint on the recovery target. However if this occurs concurrently with a relocation, an assertion could trip that we are no longer in primary mode. As this local knowledge should only be tracked when we are in primary mode, updating this local knowledge should be done under a permit. This commit causes that to be the case. Relates #26666 --- .../elasticsearch/indices/recovery/RecoverySourceHandler.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 01d971ba32599..c717e29353b65 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -477,7 +477,7 @@ public void finalizeRecovery(final long targetLocalCheckpoint) { runUnderPrimaryPermit(() -> shard.markAllocationIdAsInSync(request.targetAllocationId(), targetLocalCheckpoint)); final long globalCheckpoint = shard.getGlobalCheckpoint(); cancellableThreads.execute(() -> recoveryTarget.finalizeRecovery(globalCheckpoint)); - shard.updateGlobalCheckpointForShard(request.targetAllocationId(), globalCheckpoint); + runUnderPrimaryPermit(() -> shard.updateGlobalCheckpointForShard(request.targetAllocationId(), globalCheckpoint)); if (request.isPrimaryRelocation()) { logger.trace("performing relocation hand-off"); From 9f97f9072aa65f939af1b458dc46b98e93d08409 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Mon, 18 Sep 2017 17:52:36 +0200 Subject: [PATCH 56/67] Allow `InputStreamStreamInput` array size validation where applicable (#26692) Today we can't validate the array length in `InputStreamStreamInput` since we can't rely on `InputStream.available` yet in some situations we know the size of the stream and can apply additional validation. --- .../io/stream/InputStreamStreamInput.java | 23 ++++++++++++++++++- .../common/io/stream/StreamInput.java | 2 +- .../index/translog/TranslogReader.java | 3 ++- .../common/io/stream/StreamTests.java | 17 ++++++++++++++ .../percolator/PercolateQueryBuilder.java | 3 ++- 5 files changed, 44 insertions(+), 4 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/InputStreamStreamInput.java b/core/src/main/java/org/elasticsearch/common/io/stream/InputStreamStreamInput.java index 6d952b01a21e3..5999427e1a206 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/InputStreamStreamInput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/InputStreamStreamInput.java @@ -28,9 +28,28 @@ public class InputStreamStreamInput extends StreamInput { private final InputStream is; + private final long sizeLimit; + /** + * Creates a new InputStreamStreamInput with unlimited size + * @param is the input stream to wrap + */ public InputStreamStreamInput(InputStream is) { + this(is, Long.MAX_VALUE); + } + + /** + * Creates a new InputStreamStreamInput with a size limit + * @param is the input stream to wrap + * @param sizeLimit a hard limit of the number of bytes in the given input stream. This is used for internal input validation + */ + public InputStreamStreamInput(InputStream is, long sizeLimit) { this.is = is; + if (sizeLimit < 0) { + throw new IllegalArgumentException("size limit must be positive"); + } + this.sizeLimit = sizeLimit; + } @Override @@ -98,6 +117,8 @@ public long skip(long n) throws IOException { @Override protected void ensureCanReadBytes(int length) throws EOFException { - // TODO what can we do here? + if (length > sizeLimit) { + throw new EOFException("tried to read: " + length + " bytes but this stream is limited to: " + sizeLimit); + } } } diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index ac627cfd95d7f..31f53874f1949 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -928,7 +928,7 @@ public static StreamInput wrap(byte[] bytes) { } public static StreamInput wrap(byte[] bytes, int offset, int length) { - return new InputStreamStreamInput(new ByteArrayInputStream(bytes, offset, length)); + return new InputStreamStreamInput(new ByteArrayInputStream(bytes, offset, length), length); } /** diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java index 46439afead10a..b88037c32fd59 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java @@ -79,7 +79,8 @@ public static TranslogReader open( final FileChannel channel, final Path path, final Checkpoint checkpoint, final String translogUUID) throws IOException { try { - InputStreamStreamInput headerStream = new InputStreamStreamInput(java.nio.channels.Channels.newInputStream(channel)); // don't close + InputStreamStreamInput headerStream = new InputStreamStreamInput(java.nio.channels.Channels.newInputStream(channel), + channel.size()); // don't close // Lucene's CodecUtil writes a magic number of 0x3FD76C17 with the // header, in binary this looks like: // diff --git a/core/src/test/java/org/elasticsearch/common/io/stream/StreamTests.java b/core/src/test/java/org/elasticsearch/common/io/stream/StreamTests.java index 9d885fe131c7a..d64dece7867aa 100644 --- a/core/src/test/java/org/elasticsearch/common/io/stream/StreamTests.java +++ b/core/src/test/java/org/elasticsearch/common/io/stream/StreamTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.test.ESTestCase; import java.io.ByteArrayInputStream; +import java.io.EOFException; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -192,6 +193,22 @@ public void testInputStreamStreamInputDelegatesAvailable() throws IOException { assertEquals(streamInput.available(), length - bytesToRead); } + public void testReadArraySize() throws IOException { + BytesStreamOutput stream = new BytesStreamOutput(); + byte[] array = new byte[randomIntBetween(1, 10)]; + for (int i = 0; i < array.length; i++) { + array[i] = randomByte(); + } + stream.writeByteArray(array); + InputStreamStreamInput streamInput = new InputStreamStreamInput(StreamInput.wrap(BytesReference.toBytes(stream.bytes())), array + .length-1); + expectThrows(EOFException.class, streamInput::readByteArray); + streamInput = new InputStreamStreamInput(StreamInput.wrap(BytesReference.toBytes(stream.bytes())), BytesReference.toBytes(stream + .bytes()).length); + + assertArrayEquals(array, streamInput.readByteArray()); + } + public void testWritableArrays() throws IOException { final String[] strings = generateRandomStringArray(10, 10, false, true); diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index 337b7ee2f36b5..db1b444dcd28e 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -705,7 +705,8 @@ static PercolateQuery.QueryStore createStore(MappedFieldType queryBuilderFieldTy if (binaryDocValues.advanceExact(docId)) { BytesRef qbSource = binaryDocValues.binaryValue(); try (InputStream in = new ByteArrayInputStream(qbSource.bytes, qbSource.offset, qbSource.length)) { - try (StreamInput input = new NamedWriteableAwareStreamInput(new InputStreamStreamInput(in), registry)) { + try (StreamInput input = new NamedWriteableAwareStreamInput( + new InputStreamStreamInput(in, qbSource.length), registry)) { input.setVersion(indexVersion); // Query builder's content is stored via BinaryFieldMapper, which has a custom encoding // to encode multiple binary values into a single binary doc values field. From 98f8bde38963050b282486557b5805dc3e1ffdb2 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 18 Sep 2017 14:56:03 -0400 Subject: [PATCH 57/67] Handle release of 5.6.1 * Add a version constant for 5.6.2 so that the 5.6.1 constant represents the 5.6.1 release and the 5.6.2 constant represents the unreleased 5.6 branch. --- core/src/main/java/org/elasticsearch/Version.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index b422345aa426c..00059c96dddbe 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -93,8 +93,11 @@ public class Version implements Comparable { public static final int V_5_6_0_ID = 5060099; public static final Version V_5_6_0 = new Version(V_5_6_0_ID, org.apache.lucene.util.Version.LUCENE_6_6_0); public static final int V_5_6_1_ID = 5060199; - // use proper Lucene constant once we are on a Lucene snapshot that knows about 6.6.1 + // TODO use proper Lucene constant once we are on a Lucene snapshot that knows about 6.6.1 public static final Version V_5_6_1 = new Version(V_5_6_1_ID, org.apache.lucene.util.Version.fromBits(6, 6, 1)); + public static final int V_5_6_2_ID = 5060299; + // TODO use proper Lucene constant once we are on a Lucene snapshot that knows about 6.6.1 + public static final Version V_5_6_2 = new Version(V_5_6_2_ID, org.apache.lucene.util.Version.fromBits(6, 6, 1)); public static final int V_6_0_0_alpha1_ID = 6000001; public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0); @@ -145,6 +148,8 @@ public static Version fromId(int id) { return V_6_0_0_alpha2; case V_6_0_0_alpha1_ID: return V_6_0_0_alpha1; + case V_5_6_2_ID: + return V_5_6_2; case V_5_6_1_ID: return V_5_6_1; case V_5_6_0_ID: From cc726cb3b63a2257df9dcd88ec3a5f0318986e17 Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Mon, 18 Sep 2017 13:19:34 -0700 Subject: [PATCH 58/67] convert more admin requests to writeable (#26566) --- .../get/GetRepositoriesRequest.java | 20 +++--- .../get/TransportGetRepositoriesAction.java | 2 +- .../shards/ClusterSearchShardsRequest.java | 72 ++++++++++--------- .../TransportClusterSearchShardsAction.java | 2 +- .../cluster/state/ClusterStateRequest.java | 44 ++++++------ .../state/TransportClusterStateAction.java | 2 +- .../storedscripts/GetStoredScriptRequest.java | 39 +++++----- .../TransportGetStoredScriptAction.java | 2 +- .../tasks/PendingClusterTasksRequest.java | 10 +++ .../TransportPendingClusterTasksAction.java | 2 +- .../exists/TransportAliasesExistAction.java | 2 +- .../indices/alias/get/GetAliasesRequest.java | 28 ++++---- .../alias/get/TransportGetAliasesAction.java | 2 +- .../exists/indices/IndicesExistsRequest.java | 24 ++++--- .../indices/TransportIndicesExistsAction.java | 2 +- .../types/TransportTypesExistsAction.java | 2 +- .../exists/types/TypesExistsRequest.java | 28 ++++---- .../admin/indices/get/GetIndexRequest.java | 22 ++++-- .../indices/get/TransportGetIndexAction.java | 2 +- .../mapping/get/GetMappingsRequest.java | 10 +++ .../get/TransportGetMappingsAction.java | 2 +- .../settings/get/GetSettingsRequest.java | 35 +++++---- .../get/TransportGetSettingsAction.java | 2 +- .../shards/IndicesShardStoresRequest.java | 42 ++++++----- .../TransportIndicesShardStoresAction.java | 2 +- .../get/GetIndexTemplatesRequest.java | 20 +++--- .../get/TransportGetIndexTemplatesAction.java | 2 +- .../action/ingest/GetPipelineRequest.java | 20 +++--- .../ingest/GetPipelineTransportAction.java | 2 +- .../support/master/MasterNodeReadRequest.java | 5 +- .../master/info/ClusterInfoRequest.java | 28 +++++--- .../info/TransportClusterInfoAction.java | 5 +- .../ClusterSearchShardsRequestTests.java | 3 +- .../state/ClusterStateRequestTests.java | 3 +- .../GetStoredScriptRequestTests.java | 3 +- .../TransportClientNodesServiceTests.java | 2 +- .../RemoteClusterConnectionTests.java | 4 +- 37 files changed, 288 insertions(+), 209 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java index a0e6de916ff07..9c2e2b80a6d28 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java @@ -51,6 +51,17 @@ public GetRepositoriesRequest(String[] repositories) { this.repositories = repositories; } + public GetRepositoriesRequest(StreamInput in) throws IOException { + super(in); + repositories = in.readStringArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(repositories); + } + @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; @@ -85,13 +96,6 @@ public GetRepositoriesRequest repositories(String[] repositories) { @Override public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - repositories = in.readStringArray(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeStringArray(repositories); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java index 490d20f086cc4..6e7a0ca5cf49c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java @@ -51,7 +51,7 @@ public class TransportGetRepositoriesAction extends TransportMasterNodeReadActio @Inject public TransportGetRepositoriesAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, GetRepositoriesAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, GetRepositoriesRequest::new); + super(settings, GetRepositoriesAction.NAME, transportService, clusterService, threadPool, actionFilters, GetRepositoriesRequest::new, indexNameExpressionResolver); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java index df38690b790a4..d8dfd71530922 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java @@ -49,6 +49,42 @@ public ClusterSearchShardsRequest(String... indices) { indices(indices); } + public ClusterSearchShardsRequest(StreamInput in) throws IOException { + super(in); + indices = new String[in.readVInt()]; + for (int i = 0; i < indices.length; i++) { + indices[i] = in.readString(); + } + + routing = in.readOptionalString(); + preference = in.readOptionalString(); + + if (in.getVersion().onOrBefore(Version.V_5_1_1)) { + //types + in.readStringArray(); + } + indicesOptions = IndicesOptions.readIndicesOptions(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + + out.writeVInt(indices.length); + for (String index : indices) { + out.writeString(index); + } + + out.writeOptionalString(routing); + out.writeOptionalString(preference); + + if (out.getVersion().onOrBefore(Version.V_5_1_1)) { + //types + out.writeStringArray(Strings.EMPTY_ARRAY); + } + indicesOptions.writeIndicesOptions(out); + } + @Override public ActionRequestValidationException validate() { return null; @@ -124,40 +160,6 @@ public String preference() { @Override public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - - indices = new String[in.readVInt()]; - for (int i = 0; i < indices.length; i++) { - indices[i] = in.readString(); - } - - routing = in.readOptionalString(); - preference = in.readOptionalString(); - - if (in.getVersion().onOrBefore(Version.V_5_1_1)) { - //types - in.readStringArray(); - } - indicesOptions = IndicesOptions.readIndicesOptions(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - - out.writeVInt(indices.length); - for (String index : indices) { - out.writeString(index); - } - - out.writeOptionalString(routing); - out.writeOptionalString(preference); - - if (out.getVersion().onOrBefore(Version.V_5_1_1)) { - //types - out.writeStringArray(Strings.EMPTY_ARRAY); - } - indicesOptions.writeIndicesOptions(out); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } - } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java index 20ed69ae5a92f..9774ecdffba17 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -54,7 +54,7 @@ public TransportClusterSearchShardsAction(Settings settings, TransportService tr IndicesService indicesService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { super(settings, ClusterSearchShardsAction.NAME, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver, ClusterSearchShardsRequest::new); + ClusterSearchShardsRequest::new, indexNameExpressionResolver); this.indicesService = indicesService; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java index e6b468b804b0d..33a20332526bf 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java @@ -42,6 +42,29 @@ public class ClusterStateRequest extends MasterNodeReadRequest { + public PendingClusterTasksRequest() { + } + + public PendingClusterTasksRequest(StreamInput in) throws IOException { + super(in); + } + @Override public ActionRequestValidationException validate() { return null; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java index cd58bb8d6d43e..542b2dd8badc4 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java @@ -42,7 +42,7 @@ public class TransportPendingClusterTasksAction extends TransportMasterNodeReadA @Inject public TransportPendingClusterTasksAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, PendingClusterTasksAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, PendingClusterTasksRequest::new); + super(settings, PendingClusterTasksAction.NAME, transportService, clusterService, threadPool, actionFilters, PendingClusterTasksRequest::new, indexNameExpressionResolver); this.clusterService = clusterService; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java index 9a104598e8af3..6b77b9a39e97e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java @@ -37,7 +37,7 @@ public class TransportAliasesExistAction extends TransportMasterNodeReadAction { + public GetMappingsRequest() { + } + + public GetMappingsRequest(StreamInput in) throws IOException { + super(in); + } + @Override public ActionRequestValidationException validate() { return null; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java index 363e935ca56f0..3189a5a15c24f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java @@ -39,7 +39,7 @@ public class TransportGetMappingsAction extends TransportClusterInfoAction request) { - super(settings, actionName, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, request); + Writeable.Reader request, IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, actionName, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver); } @Override diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestTests.java index e21635596b9e7..d8b9e2f5b5e03 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestTests.java @@ -60,8 +60,7 @@ public void testSerialization() throws Exception { request.writeTo(out); try (StreamInput in = out.bytes().streamInput()) { in.setVersion(version); - ClusterSearchShardsRequest deserialized = new ClusterSearchShardsRequest(); - deserialized.readFrom(in); + ClusterSearchShardsRequest deserialized = new ClusterSearchShardsRequest(in); assertArrayEquals(request.indices(), deserialized.indices()); assertSame(request.indicesOptions(), deserialized.indicesOptions()); assertEquals(request.routing(), deserialized.routing()); diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java index b515829b72ac5..8c77ccfef90ce 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java @@ -47,8 +47,7 @@ public void testSerialization() throws Exception { StreamInput streamInput = output.bytes().streamInput(); streamInput.setVersion(testVersion); - ClusterStateRequest deserializedCSRequest = new ClusterStateRequest(); - deserializedCSRequest.readFrom(streamInput); + ClusterStateRequest deserializedCSRequest = new ClusterStateRequest(streamInput); assertThat(deserializedCSRequest.routingTable(), equalTo(clusterStateRequest.routingTable())); assertThat(deserializedCSRequest.metaData(), equalTo(clusterStateRequest.metaData())); diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequestTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequestTests.java index bd12d58b1cbcd..756b7f1e5f688 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequestTests.java @@ -38,8 +38,7 @@ public void testGetIndexedScriptRequestSerialization() throws IOException { StreamInput in = out.bytes().streamInput(); in.setVersion(out.getVersion()); - GetStoredScriptRequest request2 = new GetStoredScriptRequest(); - request2.readFrom(in); + GetStoredScriptRequest request2 = new GetStoredScriptRequest(in); assertThat(request2.id(), equalTo(request.id())); } diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java index 9ff6ae06d17e7..e63f9f04617fd 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java +++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java @@ -340,7 +340,7 @@ public void testSniffNodesSamplerClosesConnections() throws Exception { Settings remoteSettings = Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "remote").build(); try (MockTransportService remoteService = createNewService(remoteSettings, Version.CURRENT, threadPool, null)) { final MockHandler handler = new MockHandler(remoteService); - remoteService.registerRequestHandler(ClusterStateAction.NAME, ClusterStateRequest::new, ThreadPool.Names.SAME, handler); + remoteService.registerRequestHandler(ClusterStateAction.NAME, ThreadPool.Names.SAME, ClusterStateRequest::new, handler); remoteService.start(); remoteService.acceptIncomingRequests(); diff --git a/core/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/core/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index d70032ca065f7..856385531d7ec 100644 --- a/core/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/core/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -110,12 +110,12 @@ public static MockTransportService startTransport( ClusterName clusterName = ClusterName.CLUSTER_NAME_SETTING.get(s); MockTransportService newService = MockTransportService.createNewService(s, version, threadPool, null); try { - newService.registerRequestHandler(ClusterSearchShardsAction.NAME, ClusterSearchShardsRequest::new, ThreadPool.Names.SAME, + newService.registerRequestHandler(ClusterSearchShardsAction.NAME,ThreadPool.Names.SAME, ClusterSearchShardsRequest::new, (request, channel) -> { channel.sendResponse(new ClusterSearchShardsResponse(new ClusterSearchShardsGroup[0], knownNodes.toArray(new DiscoveryNode[0]), Collections.emptyMap())); }); - newService.registerRequestHandler(ClusterStateAction.NAME, ClusterStateRequest::new, ThreadPool.Names.SAME, + newService.registerRequestHandler(ClusterStateAction.NAME, ThreadPool.Names.SAME, ClusterStateRequest::new, (request, channel) -> { DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); for (DiscoveryNode node : knownNodes) { From c3746b268ca9a9e7e26ec861dd05e654c9bace62 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 18 Sep 2017 15:51:07 -0700 Subject: [PATCH 59/67] Separate Painless Whitelist Loading from the Painless Definition (#26540) Adds several small whitelist data structures and a new Whitelist class to separate the idea of loading a whitelist from the actual Painless Definition class. This is the first step of many in allowing users to define custom whitelists per context. Also supports the idea of loading multiple whitelists from different sources for a single context. --- .../elasticsearch/painless/Definition.java | 706 +++++++++--------- .../org/elasticsearch/painless/Whitelist.java | 198 +++++ .../painless/WhitelistLoader.java | 290 +++++++ .../org/elasticsearch/painless/java.lang.txt | 244 +++--- .../org/elasticsearch/painless/java.math.txt | 20 +- .../org/elasticsearch/painless/java.text.txt | 110 +-- .../painless/java.time.chrono.txt | 42 +- .../painless/java.time.format.txt | 20 +- .../painless/java.time.temporal.txt | 22 +- .../org/elasticsearch/painless/java.time.txt | 42 +- .../elasticsearch/painless/java.time.zone.txt | 14 +- .../painless/java.util.function.txt | 5 +- .../painless/java.util.regex.txt | 6 +- .../painless/java.util.stream.txt | 20 +- .../org/elasticsearch/painless/java.util.txt | 418 +++++------ .../org/elasticsearch/painless/joda.time.txt | 4 +- .../painless/org.elasticsearch.txt | 49 +- 17 files changed, 1351 insertions(+), 859 deletions(-) create mode 100644 modules/lang-painless/src/main/java/org/elasticsearch/painless/Whitelist.java create mode 100644 modules/lang-painless/src/main/java/org/elasticsearch/painless/WhitelistLoader.java diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java index 3326554d19313..6589c544af7af 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java @@ -22,14 +22,10 @@ import org.apache.lucene.util.Constants; import org.apache.lucene.util.SetOnce; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.LineNumberReader; import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodType; import java.lang.reflect.Modifier; -import java.nio.charset.StandardCharsets; import java.time.LocalDate; import java.util.ArrayList; import java.util.Arrays; @@ -41,6 +37,7 @@ import java.util.Objects; import java.util.PrimitiveIterator; import java.util.Spliterator; +import java.util.Stack; /** * The entire API for Painless. Also used as a whitelist for checking for legal @@ -48,26 +45,28 @@ */ public final class Definition { - private static final List DEFINITION_FILES = Collections.unmodifiableList( - Arrays.asList("org.elasticsearch.txt", - "java.lang.txt", - "java.math.txt", - "java.text.txt", - "java.time.txt", - "java.time.chrono.txt", - "java.time.format.txt", - "java.time.temporal.txt", - "java.time.zone.txt", - "java.util.txt", - "java.util.function.txt", - "java.util.regex.txt", - "java.util.stream.txt", - "joda.time.txt")); + private static final String[] DEFINITION_FILES = new String[] { + "org.elasticsearch.txt", + "java.lang.txt", + "java.math.txt", + "java.text.txt", + "java.time.txt", + "java.time.chrono.txt", + "java.time.format.txt", + "java.time.temporal.txt", + "java.time.zone.txt", + "java.util.txt", + "java.util.function.txt", + "java.util.regex.txt", + "java.util.stream.txt", + "joda.time.txt" + }; /** * Whitelist that is "built in" to Painless and required by all scripts. */ - public static final Definition BUILTINS = new Definition(); + public static final Definition BUILTINS = new Definition( + Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, DEFINITION_FILES))); /** Some native types as constants: */ public static final Type VOID_TYPE = BUILTINS.getType("void"); @@ -110,10 +109,10 @@ private Type(final String name, final int dimensions, final boolean dynamic, final Struct struct, final Class clazz, final org.objectweb.asm.Type type) { this.name = name; this.dimensions = dimensions; + this.dynamic = dynamic; this.struct = struct; this.clazz = clazz; this.type = type; - this.dynamic = dynamic; } @Override @@ -523,424 +522,433 @@ static Collection allSimpleTypes() { private final Map structsMap; private final Map simpleTypesMap; - private Definition() { + private Definition(List whitelists) { structsMap = new HashMap<>(); simpleTypesMap = new HashMap<>(); runtimeMap = new HashMap<>(); - // parse the classes and return hierarchy (map of class name -> superclasses/interfaces) - Map> hierarchy = addStructs(); - // add every method for each class - addElements(); - // apply hierarchy: this means e.g. copying Object's methods into String (thats how subclasses work) - for (Map.Entry> clazz : hierarchy.entrySet()) { - copyStruct(clazz.getKey(), clazz.getValue()); - } - // if someone declares an interface type, its still an Object - for (Map.Entry clazz : structsMap.entrySet()) { - String name = clazz.getKey(); - Class javaPeer = clazz.getValue().clazz; - if (javaPeer.isInterface()) { - copyStruct(name, Collections.singletonList("Object")); - } else if (name.equals("def") == false && name.equals("Object") == false && javaPeer.isPrimitive() == false) { - // but otherwise, unless its a primitive type, it really should - assert hierarchy.get(name) != null : "class '" + name + "' does not extend Object!"; - assert hierarchy.get(name).contains("Object") : "class '" + name + "' does not extend Object!"; + Map, Struct> javaClassesToPainlessStructs = new HashMap<>(); + String origin = null; + + // add the universal def type + structsMap.put("def", new Struct("def", Object.class, org.objectweb.asm.Type.getType(Object.class))); + + try { + // first iteration collects all the Painless type names that + // are used for validation during the second iteration + for (Whitelist whitelist : whitelists) { + for (Whitelist.Struct whitelistStruct : whitelist.whitelistStructs) { + Struct painlessStruct = structsMap.get(whitelistStruct.painlessTypeName); + + if (painlessStruct != null && painlessStruct.clazz.getName().equals(whitelistStruct.javaClassName) == false) { + throw new IllegalArgumentException("struct [" + painlessStruct.name + "] cannot represent multiple classes " + + "[" + painlessStruct.clazz.getName() + "] and [" + whitelistStruct.javaClassName + "]"); + } + + origin = whitelistStruct.origin; + addStruct(whitelist.javaClassLoader, whitelistStruct); + + painlessStruct = structsMap.get(whitelistStruct.painlessTypeName); + javaClassesToPainlessStructs.put(painlessStruct.clazz, painlessStruct); + } } - } - // mark functional interfaces (or set null, to mark class is not) - for (Struct clazz : structsMap.values()) { - clazz.functionalMethod.set(computeFunctionalInterfaceMethod(clazz)); - } - // precompute runtime classes - for (Struct struct : structsMap.values()) { - addRuntimeClass(struct); - } - // copy all structs to make them unmodifiable for outside users: - for (final Map.Entry entry : structsMap.entrySet()) { - entry.setValue(entry.getValue().freeze()); - } - } + // second iteration adds all the constructors, methods, and fields that will + // be available in Painless along with validating they exist and all their types have + // been white-listed during the first iteration + for (Whitelist whitelist : whitelists) { + for (Whitelist.Struct whitelistStruct : whitelist.whitelistStructs) { + for (Whitelist.Constructor whitelistConstructor : whitelistStruct.whitelistConstructors) { + origin = whitelistConstructor.origin; + addConstructor(whitelistStruct.painlessTypeName, whitelistConstructor); + } - /** adds classes from definition. returns hierarchy */ - private Map> addStructs() { - final Map> hierarchy = new HashMap<>(); - for (String file : DEFINITION_FILES) { - int currentLine = -1; - try { - try (InputStream stream = Definition.class.getResourceAsStream(file); - LineNumberReader reader = new LineNumberReader(new InputStreamReader(stream, StandardCharsets.UTF_8))) { - String line = null; - while ((line = reader.readLine()) != null) { - currentLine = reader.getLineNumber(); - line = line.trim(); - if (line.length() == 0 || line.charAt(0) == '#') { - continue; - } - if (line.startsWith("class ")) { - String elements[] = line.split("\u0020"); - assert elements[2].equals("->") : "Invalid struct definition [" + String.join(" ", elements) +"]"; - if (elements.length == 7) { - hierarchy.put(elements[1], Arrays.asList(elements[5].split(","))); - } else { - assert elements.length == 5 : "Invalid struct definition [" + String.join(" ", elements) + "]"; - } - String className = elements[1]; - String javaPeer = elements[3]; - final Class javaClazz; - switch (javaPeer) { - case "void": - javaClazz = void.class; - break; - case "boolean": - javaClazz = boolean.class; - break; - case "byte": - javaClazz = byte.class; - break; - case "short": - javaClazz = short.class; - break; - case "char": - javaClazz = char.class; - break; - case "int": - javaClazz = int.class; - break; - case "long": - javaClazz = long.class; - break; - case "float": - javaClazz = float.class; - break; - case "double": - javaClazz = double.class; - break; - default: - javaClazz = Class.forName(javaPeer); - break; - } - addStruct(className, javaClazz); - } + for (Whitelist.Method whitelistMethod : whitelistStruct.whitelistMethods) { + origin = whitelistMethod.origin; + addMethod(whitelist.javaClassLoader, whitelistStruct.painlessTypeName, whitelistMethod); + } + + for (Whitelist.Field whitelistField : whitelistStruct.whitelistFields) { + origin = whitelistField.origin; + addField(whitelistStruct.painlessTypeName, whitelistField); } } - } catch (Exception e) { - throw new RuntimeException("error in " + file + ", line: " + currentLine, e); } + } catch (Exception exception) { + throw new IllegalArgumentException("error loading whitelist(s) " + origin, exception); } - return hierarchy; - } - /** adds class methods/fields/ctors */ - private void addElements() { - for (String file : DEFINITION_FILES) { - int currentLine = -1; - try { - try (InputStream stream = Definition.class.getResourceAsStream(file); - LineNumberReader reader = new LineNumberReader(new InputStreamReader(stream, StandardCharsets.UTF_8))) { - String line = null; - String currentClass = null; - while ((line = reader.readLine()) != null) { - currentLine = reader.getLineNumber(); - line = line.trim(); - if (line.length() == 0 || line.charAt(0) == '#') { - continue; - } else if (line.startsWith("class ")) { - assert currentClass == null; - currentClass = line.split("\u0020")[1]; - } else if (line.equals("}")) { - assert currentClass != null; - currentClass = null; - } else { - assert currentClass != null; - addSignature(currentClass, line); - } + // goes through each Painless struct and determines the inheritance list, + // and then adds all inherited types to the Painless struct's whitelist + for (Struct painlessStruct : structsMap.values()) { + List painlessSuperStructs = new ArrayList<>(); + Class javaSuperClass = painlessStruct.clazz.getSuperclass(); + + Stack> javaInteraceLookups = new Stack<>(); + javaInteraceLookups.push(painlessStruct.clazz); + + // adds super classes to the inheritance list + if (javaSuperClass != null && javaSuperClass.isInterface() == false) { + while (javaSuperClass != null) { + Struct painlessSuperStruct = javaClassesToPainlessStructs.get(javaSuperClass); + + if (painlessSuperStruct != null) { + painlessSuperStructs.add(painlessSuperStruct.name); } + + javaInteraceLookups.push(javaSuperClass); + javaSuperClass = javaSuperClass.getSuperclass(); } - } catch (Exception e) { - throw new RuntimeException("syntax error in " + file + ", line: " + currentLine, e); } - } - } - private void addStruct(final String name, final Class clazz) { - if (!name.matches("^[_a-zA-Z][\\.,_a-zA-Z0-9]*$")) { - throw new IllegalArgumentException("Invalid struct name [" + name + "]."); - } + // adds all super interfaces to the inheritance list + while (javaInteraceLookups.isEmpty() == false) { + Class javaInterfaceLookup = javaInteraceLookups.pop(); - if (structsMap.containsKey(name)) { - throw new IllegalArgumentException("Duplicate struct name [" + name + "]."); - } + for (Class javaSuperInterface : javaInterfaceLookup.getInterfaces()) { + Struct painlessInterfaceStruct = javaClassesToPainlessStructs.get(javaSuperInterface); - final Struct struct = new Struct(name, clazz, org.objectweb.asm.Type.getType(clazz)); + if (painlessInterfaceStruct != null) { + String painlessInterfaceStructName = painlessInterfaceStruct.name; - structsMap.put(name, struct); - simpleTypesMap.put(name, getTypeInternal(name)); - } + if (painlessSuperStructs.contains(painlessInterfaceStructName) == false) { + painlessSuperStructs.add(painlessInterfaceStructName); + } - private void addConstructorInternal(final String struct, final String name, final Type[] args) { - final Struct owner = structsMap.get(struct); + for (Class javaPushInterface : javaInterfaceLookup.getInterfaces()) { + javaInteraceLookups.push(javaPushInterface); + } + } + } + } - if (owner == null) { - throw new IllegalArgumentException( - "Owner struct [" + struct + "] not defined for constructor [" + name + "]."); + // copies methods and fields from super structs to the parent struct + copyStruct(painlessStruct.name, painlessSuperStructs); + + // copies methods and fields from Object into interface types + if (painlessStruct.clazz.isInterface() || ("def").equals(painlessStruct.name)) { + Struct painlessObjectStruct = javaClassesToPainlessStructs.get(Object.class); + + if (painlessObjectStruct != null) { + copyStruct(painlessStruct.name, Collections.singletonList(painlessObjectStruct.name)); + } + } } - if (!name.matches("")) { - throw new IllegalArgumentException( - "Invalid constructor name [" + name + "] with the struct [" + owner.name + "]."); + // mark functional interfaces (or set null, to mark class is not) + for (Struct clazz : structsMap.values()) { + clazz.functionalMethod.set(computeFunctionalInterfaceMethod(clazz)); } - MethodKey methodKey = new MethodKey(name, args.length); + // precompute runtime classes + for (Struct struct : structsMap.values()) { + addRuntimeClass(struct); + } + // copy all structs to make them unmodifiable for outside users: + for (final Map.Entry entry : structsMap.entrySet()) { + entry.setValue(entry.getValue().freeze()); + } + } - if (owner.constructors.containsKey(methodKey)) { - throw new IllegalArgumentException( - "Duplicate constructor [" + methodKey + "] found within the struct [" + owner.name + "]."); + private void addStruct(ClassLoader whitelistClassLoader, Whitelist.Struct whitelistStruct) { + if (!whitelistStruct.painlessTypeName.matches("^[_a-zA-Z][._a-zA-Z0-9]*")) { + throw new IllegalArgumentException("invalid struct type name [" + whitelistStruct.painlessTypeName + "]"); } - if (owner.staticMethods.containsKey(methodKey)) { - throw new IllegalArgumentException("Constructors and static methods may not have the same signature" + - " [" + methodKey + "] within the same struct [" + owner.name + "]."); + Class javaClass; + + if ("void".equals(whitelistStruct.javaClassName)) javaClass = void.class; + else if ("boolean".equals(whitelistStruct.javaClassName)) javaClass = boolean.class; + else if ("byte".equals(whitelistStruct.javaClassName)) javaClass = byte.class; + else if ("short".equals(whitelistStruct.javaClassName)) javaClass = short.class; + else if ("char".equals(whitelistStruct.javaClassName)) javaClass = char.class; + else if ("int".equals(whitelistStruct.javaClassName)) javaClass = int.class; + else if ("long".equals(whitelistStruct.javaClassName)) javaClass = long.class; + else if ("float".equals(whitelistStruct.javaClassName)) javaClass = float.class; + else if ("double".equals(whitelistStruct.javaClassName)) javaClass = double.class; + else { + try { + javaClass = Class.forName(whitelistStruct.javaClassName, true, whitelistClassLoader); + } catch (ClassNotFoundException cnfe) { + throw new IllegalArgumentException("invalid java class name [" + whitelistStruct.javaClassName + "]" + + " for struct [" + whitelistStruct.painlessTypeName + "]"); + } } - if (owner.methods.containsKey(methodKey)) { - throw new IllegalArgumentException("Constructors and methods may not have the same signature" + - " [" + methodKey + "] within the same struct [" + owner.name + "]."); + Struct existingStruct = structsMap.get(whitelistStruct.painlessTypeName); + + if (existingStruct == null) { + Struct struct = new Struct(whitelistStruct.painlessTypeName, javaClass, org.objectweb.asm.Type.getType(javaClass)); + + structsMap.put(whitelistStruct.painlessTypeName, struct); + simpleTypesMap.put(whitelistStruct.painlessTypeName, getTypeInternal(whitelistStruct.painlessTypeName)); + } else if (existingStruct.clazz.equals(javaClass) == false) { + throw new IllegalArgumentException("struct [" + whitelistStruct.painlessTypeName + "] is used to " + + "illegally represent multiple java classes [" + whitelistStruct.javaClassName + "] and " + + "[" + existingStruct.clazz.getName() + "]"); } + } - final Class[] classes = new Class[args.length]; + private void addConstructor(String ownerStructName, Whitelist.Constructor whitelistConstructor) { + Struct ownerStruct = structsMap.get(ownerStructName); - for (int count = 0; count < classes.length; ++count) { - classes[count] = args[count].clazz; + if (ownerStruct == null) { + throw new IllegalArgumentException("owner struct [" + ownerStructName + "] not defined for constructor with " + + "parameters " + whitelistConstructor.painlessParameterTypeNames); } - final java.lang.reflect.Constructor reflect; + List painlessParametersTypes = new ArrayList<>(whitelistConstructor.painlessParameterTypeNames.size()); + Class[] javaClassParameters = new Class[whitelistConstructor.painlessParameterTypeNames.size()]; - try { - reflect = owner.clazz.getConstructor(classes); - } catch (final NoSuchMethodException exception) { - throw new IllegalArgumentException("Constructor [" + name + "] not found for class" + - " [" + owner.clazz.getName() + "] with arguments " + Arrays.toString(classes) + "."); + for (int parameterCount = 0; parameterCount < whitelistConstructor.painlessParameterTypeNames.size(); ++parameterCount) { + String painlessParameterTypeName = whitelistConstructor.painlessParameterTypeNames.get(parameterCount); + + try { + Type painlessParameterType = getTypeInternal(painlessParameterTypeName); + + painlessParametersTypes.add(painlessParameterType); + javaClassParameters[parameterCount] = painlessParameterType.clazz; + } catch (IllegalArgumentException iae) { + throw new IllegalArgumentException("struct not defined for constructor parameter [" + painlessParameterTypeName + "] " + + "with owner struct [" + ownerStructName + "] and constructor parameters " + + whitelistConstructor.painlessParameterTypeNames, iae); + } } - final org.objectweb.asm.commons.Method asm = org.objectweb.asm.commons.Method.getMethod(reflect); - final Type returnType = getTypeInternal("void"); - final MethodHandle handle; + java.lang.reflect.Constructor javaConstructor; try { - handle = MethodHandles.publicLookup().in(owner.clazz).unreflectConstructor(reflect); - } catch (final IllegalAccessException exception) { - throw new IllegalArgumentException("Constructor " + - " not found for class [" + owner.clazz.getName() + "]" + - " with arguments " + Arrays.toString(classes) + "."); + javaConstructor = ownerStruct.clazz.getConstructor(javaClassParameters); + } catch (NoSuchMethodException exception) { + throw new IllegalArgumentException("constructor not defined for owner struct [" + ownerStructName + "] " + + " with constructor parameters " + whitelistConstructor.painlessParameterTypeNames, exception); } - final Method constructor = new Method(name, owner, null, returnType, Arrays.asList(args), asm, reflect.getModifiers(), handle); + MethodKey painlessMethodKey = new MethodKey("", whitelistConstructor.painlessParameterTypeNames.size()); + Method painlessConstructor = ownerStruct.constructors.get(painlessMethodKey); - owner.constructors.put(methodKey, constructor); - } + if (painlessConstructor == null) { + org.objectweb.asm.commons.Method asmConstructor = org.objectweb.asm.commons.Method.getMethod(javaConstructor); + MethodHandle javaHandle; - /** - * Adds a new signature to the definition. - *

- * Signatures have the following forms: - *

    - *
  • {@code void method(String,int)} - *
  • {@code boolean field} - *
  • {@code Class (String)} - *
- * no spaces allowed. - */ - private void addSignature(String className, String signature) { - String elements[] = signature.split("\u0020"); - if (elements.length != 2) { - throw new IllegalArgumentException("Malformed signature: " + signature); - } - // method or field type (e.g. return type) - Type rtn = getTypeInternal(elements[0]); - int parenIndex = elements[1].indexOf('('); - if (parenIndex != -1) { - // method or ctor - int parenEnd = elements[1].indexOf(')'); - final Type args[]; - if (parenEnd > parenIndex + 1) { - String arguments[] = elements[1].substring(parenIndex + 1, parenEnd).split(","); - args = new Type[arguments.length]; - for (int i = 0; i < arguments.length; i++) { - args[i] = getTypeInternal(arguments[i]); - } - } else { - args = new Type[0]; + try { + javaHandle = MethodHandles.publicLookup().in(ownerStruct.clazz).unreflectConstructor(javaConstructor); + } catch (IllegalAccessException exception) { + throw new IllegalArgumentException("constructor not defined for owner struct [" + ownerStructName + "] " + + " with constructor parameters " + whitelistConstructor.painlessParameterTypeNames); } - String methodName = elements[1].substring(0, parenIndex); - if (methodName.equals("")) { - if (!elements[0].equals(className)) { - throw new IllegalArgumentException("Constructors must return their own type"); - } - addConstructorInternal(className, "", args); - } else { - int index = methodName.lastIndexOf("."); - if (index >= 0) { - String augmentation = methodName.substring(0, index); - methodName = methodName.substring(index + 1); - addMethodInternal(className, methodName, augmentation, rtn, args); - } else { - addMethodInternal(className, methodName, null, rtn, args); - } - } - } else { - // field - addFieldInternal(className, elements[1], rtn); + painlessConstructor = new Method("", ownerStruct, null, getTypeInternal("void"), painlessParametersTypes, + asmConstructor, javaConstructor.getModifiers(), javaHandle); + ownerStruct.constructors.put(painlessMethodKey, painlessConstructor); + } else if (painlessConstructor.equals(painlessParametersTypes) == false){ + throw new IllegalArgumentException( + "illegal duplicate constructors [" + painlessMethodKey + "] found within the struct [" + ownerStruct.name + "] " + + "with parameters " + painlessParametersTypes + " and " + painlessConstructor.arguments); } } - private void addMethodInternal(String struct, String name, String augmentation, Type rtn, Type[] args) { - final Struct owner = structsMap.get(struct); + private void addMethod(ClassLoader whitelistClassLoader, String ownerStructName, Whitelist.Method whitelistMethod) { + Struct ownerStruct = structsMap.get(ownerStructName); - if (owner == null) { - throw new IllegalArgumentException("Owner struct [" + struct + "] not defined" + - " for method [" + name + "]."); + if (ownerStruct == null) { + throw new IllegalArgumentException("owner struct [" + ownerStructName + "] not defined for method with " + + "name [" + whitelistMethod.javaMethodName + "] and parameters " + whitelistMethod.painlessParameterTypeNames); } - if (!name.matches("^[_a-zA-Z][_a-zA-Z0-9]*$")) { - throw new IllegalArgumentException("Invalid method name" + - " [" + name + "] with the struct [" + owner.name + "]."); + if (!whitelistMethod.javaMethodName.matches("^[_a-zA-Z][_a-zA-Z0-9]*$")) { + throw new IllegalArgumentException("invalid method name" + + " [" + whitelistMethod.javaMethodName + "] for owner struct [" + ownerStructName + "]."); } - MethodKey methodKey = new MethodKey(name, args.length); + Class javaAugmentedClass = null; - if (owner.constructors.containsKey(methodKey)) { - throw new IllegalArgumentException("Constructors and methods" + - " may not have the same signature [" + methodKey + "] within the same struct" + - " [" + owner.name + "]."); + if (whitelistMethod.javaAugmentedClassName != null) { + try { + javaAugmentedClass = Class.forName(whitelistMethod.javaAugmentedClassName, true, whitelistClassLoader); + } catch (ClassNotFoundException cnfe) { + throw new IllegalArgumentException("augmented class [" + whitelistMethod.javaAugmentedClassName + "] " + + "not found for method with name [" + whitelistMethod.javaMethodName + "] " + + "and parameters " + whitelistMethod.painlessParameterTypeNames, cnfe); + } } - if (owner.staticMethods.containsKey(methodKey) || owner.methods.containsKey(methodKey)) { - throw new IllegalArgumentException( - "Duplicate method signature [" + methodKey + "] found within the struct [" + owner.name + "]."); + int augmentedOffset = javaAugmentedClass == null ? 0 : 1; + + List painlessParametersTypes = new ArrayList<>(whitelistMethod.painlessParameterTypeNames.size()); + Class[] javaClassParameters = new Class[whitelistMethod.painlessParameterTypeNames.size() + augmentedOffset]; + + if (javaAugmentedClass != null) { + javaClassParameters[0] = ownerStruct.clazz; } - final Class implClass; - final Class[] params; + for (int parameterCount = 0; parameterCount < whitelistMethod.painlessParameterTypeNames.size(); ++parameterCount) { + String painlessParameterTypeName = whitelistMethod.painlessParameterTypeNames.get(parameterCount); - if (augmentation == null) { - implClass = owner.clazz; - params = new Class[args.length]; - for (int count = 0; count < args.length; ++count) { - params[count] = args[count].clazz; - } - } else { try { - implClass = Class.forName(augmentation); - } catch (ClassNotFoundException cnfe) { - throw new IllegalArgumentException("Augmentation class [" + augmentation + "]" + - " not found for struct [" + struct + "] using method name [" + name + "].", cnfe); + Type painlessParameterType = getTypeInternal(painlessParameterTypeName); + + painlessParametersTypes.add(painlessParameterType); + javaClassParameters[parameterCount + augmentedOffset] = painlessParameterType.clazz; + } catch (IllegalArgumentException iae) { + throw new IllegalArgumentException("struct not defined for method parameter [" + painlessParameterTypeName + "] " + + "with owner struct [" + ownerStructName + "] and method with name [" + whitelistMethod.javaMethodName + "] " + + "and parameters " + whitelistMethod.painlessParameterTypeNames, iae); } + } - params = new Class[args.length + 1]; - params[0] = owner.clazz; - for (int count = 0; count < args.length; ++count) { - params[count+1] = args[count].clazz; - } + Class javaImplClass = javaAugmentedClass == null ? ownerStruct.clazz : javaAugmentedClass; + java.lang.reflect.Method javaMethod; + + try { + javaMethod = javaImplClass.getMethod(whitelistMethod.javaMethodName, javaClassParameters); + } catch (NoSuchMethodException nsme) { + throw new IllegalArgumentException("method with name [" + whitelistMethod.javaMethodName + "] " + + "and parameters " + whitelistMethod.painlessParameterTypeNames + " not found for class [" + + javaImplClass.getName() + "]", nsme); } - final java.lang.reflect.Method reflect; + Type painlessReturnType; try { - reflect = implClass.getMethod(name, params); - } catch (NoSuchMethodException exception) { - throw new IllegalArgumentException("Method [" + name + - "] not found for class [" + implClass.getName() + "]" + - " with arguments " + Arrays.toString(params) + "."); + painlessReturnType = getTypeInternal(whitelistMethod.painlessReturnTypeName); + } catch (IllegalArgumentException iae) { + throw new IllegalArgumentException("struct not defined for return type [" + whitelistMethod.painlessReturnTypeName + "] " + + "with owner struct [" + ownerStructName + "] and method with name [" + whitelistMethod.javaMethodName + "] " + + "and parameters " + whitelistMethod.painlessParameterTypeNames, iae); } - if (!reflect.getReturnType().equals(rtn.clazz)) { - throw new IllegalArgumentException("Specified return type class [" + rtn.clazz + "]" + - " does not match the found return type class [" + reflect.getReturnType() + "] for the" + - " method [" + name + "]" + - " within the struct [" + owner.name + "]."); + if (javaMethod.getReturnType().equals(painlessReturnType.clazz) == false) { + throw new IllegalArgumentException("specified return type class [" + painlessReturnType.clazz + "] " + + "does not match the return type class [" + javaMethod.getReturnType() + "] for the " + + "method with name [" + whitelistMethod.javaMethodName + "] " + + "and parameters " + whitelistMethod.painlessParameterTypeNames); } - final org.objectweb.asm.commons.Method asm = org.objectweb.asm.commons.Method.getMethod(reflect); + MethodKey painlessMethodKey = new MethodKey(whitelistMethod.javaMethodName, whitelistMethod.painlessParameterTypeNames.size()); - MethodHandle handle; + if (javaAugmentedClass == null && Modifier.isStatic(javaMethod.getModifiers())) { + Method painlessMethod = ownerStruct.staticMethods.get(painlessMethodKey); - try { - handle = MethodHandles.publicLookup().in(implClass).unreflect(reflect); - } catch (final IllegalAccessException exception) { - throw new IllegalArgumentException("Method [" + name + "]" + - " not found for class [" + implClass.getName() + "]" + - " with arguments " + Arrays.toString(params) + "."); - } + if (painlessMethod == null) { + org.objectweb.asm.commons.Method asmMethod = org.objectweb.asm.commons.Method.getMethod(javaMethod); + MethodHandle javaMethodHandle; - final int modifiers = reflect.getModifiers(); - final Method method = - new Method(name, owner, augmentation == null ? null : implClass, rtn, Arrays.asList(args), asm, modifiers, handle); + try { + javaMethodHandle = MethodHandles.publicLookup().in(javaImplClass).unreflect(javaMethod); + } catch (IllegalAccessException exception) { + throw new IllegalArgumentException("method handle not found for method with name " + + "[" + whitelistMethod.javaMethodName + "] and parameters " + whitelistMethod.painlessParameterTypeNames); + } - if (augmentation == null && java.lang.reflect.Modifier.isStatic(modifiers)) { - owner.staticMethods.put(methodKey, method); + painlessMethod = new Method(whitelistMethod.javaMethodName, ownerStruct, null, painlessReturnType, + painlessParametersTypes, asmMethod, javaMethod.getModifiers(), javaMethodHandle); + ownerStruct.staticMethods.put(painlessMethodKey, painlessMethod); + } else if ((painlessMethod.name.equals(whitelistMethod.javaMethodName) && painlessMethod.rtn.equals(painlessReturnType) && + painlessMethod.arguments.equals(painlessParametersTypes)) == false) { + throw new IllegalArgumentException("illegal duplicate static methods [" + painlessMethodKey + "] " + + "found within the struct [" + ownerStruct.name + "] with name [" + whitelistMethod.javaMethodName + "], " + + "return types [" + painlessReturnType + "] and [" + painlessMethod.rtn.name + "], " + + "and parameters " + painlessParametersTypes + " and " + painlessMethod.arguments); + } } else { - owner.methods.put(methodKey, method); + Method painlessMethod = ownerStruct.methods.get(painlessMethodKey); + + if (painlessMethod == null) { + org.objectweb.asm.commons.Method asmMethod = org.objectweb.asm.commons.Method.getMethod(javaMethod); + MethodHandle javaMethodHandle; + + try { + javaMethodHandle = MethodHandles.publicLookup().in(javaImplClass).unreflect(javaMethod); + } catch (IllegalAccessException exception) { + throw new IllegalArgumentException("method handle not found for method with name " + + "[" + whitelistMethod.javaMethodName + "] and parameters " + whitelistMethod.painlessParameterTypeNames); + } + + painlessMethod = new Method(whitelistMethod.javaMethodName, ownerStruct, javaAugmentedClass, painlessReturnType, + painlessParametersTypes, asmMethod, javaMethod.getModifiers(), javaMethodHandle); + ownerStruct.methods.put(painlessMethodKey, painlessMethod); + } else if ((painlessMethod.name.equals(whitelistMethod.javaMethodName) && painlessMethod.rtn.equals(painlessReturnType) && + painlessMethod.arguments.equals(painlessParametersTypes)) == false) { + throw new IllegalArgumentException("illegal duplicate member methods [" + painlessMethodKey + "] " + + "found within the struct [" + ownerStruct.name + "] with name [" + whitelistMethod.javaMethodName + "], " + + "return types [" + painlessReturnType + "] and [" + painlessMethod.rtn.name + "], " + + "and parameters " + painlessParametersTypes + " and " + painlessMethod.arguments); + } } } - private void addFieldInternal(String struct, String name, Type type) { - final Struct owner = structsMap.get(struct); + private void addField(String ownerStructName, Whitelist.Field whitelistField) { + Struct ownerStruct = structsMap.get(ownerStructName); - if (owner == null) { - throw new IllegalArgumentException("Owner struct [" + struct + "] not defined for " + - " field [" + name + "]."); + if (ownerStruct == null) { + throw new IllegalArgumentException("owner struct [" + ownerStructName + "] not defined for method with " + + "name [" + whitelistField.javaFieldName + "] and type " + whitelistField.painlessFieldTypeName); } - if (!name.matches("^[_a-zA-Z][_a-zA-Z0-9]*$")) { - throw new IllegalArgumentException("Invalid field " + - " name [" + name + "] with the struct [" + owner.name + "]."); + if (!whitelistField.javaFieldName.matches("^[_a-zA-Z][_a-zA-Z0-9]*$")) { + throw new IllegalArgumentException("invalid field name " + + "[" + whitelistField.painlessFieldTypeName + "] for owner struct [" + ownerStructName + "]."); } - if (owner.staticMembers.containsKey(name) || owner.members.containsKey(name)) { - throw new IllegalArgumentException("Duplicate field name [" + name + "]" + - " found within the struct [" + owner.name + "]."); + java.lang.reflect.Field javaField; + + try { + javaField = ownerStruct.clazz.getField(whitelistField.javaFieldName); + } catch (NoSuchFieldException exception) { + throw new IllegalArgumentException("field [" + whitelistField.javaFieldName + "] " + + "not found for class [" + ownerStruct.clazz.getName() + "]."); } - java.lang.reflect.Field reflect; + Type painlessFieldType; try { - reflect = owner.clazz.getField(name); - } catch (final NoSuchFieldException exception) { - throw new IllegalArgumentException("Field [" + name + "]" + - " not found for class [" + owner.clazz.getName() + "]."); + painlessFieldType = getTypeInternal(whitelistField.painlessFieldTypeName); + } catch (IllegalArgumentException iae) { + throw new IllegalArgumentException("struct not defined for return type [" + whitelistField.painlessFieldTypeName + "] " + + "with owner struct [" + ownerStructName + "] and field with name [" + whitelistField.javaFieldName + "]", iae); } - final int modifiers = reflect.getModifiers(); - boolean isStatic = java.lang.reflect.Modifier.isStatic(modifiers); + if (Modifier.isStatic(javaField.getModifiers())) { + if (Modifier.isFinal(javaField.getModifiers()) == false) { + throw new IllegalArgumentException("static [" + whitelistField.javaFieldName + "] " + + "with owner struct [" + ownerStruct.name + "] is not final"); + } - MethodHandle getter = null; - MethodHandle setter = null; + Field painlessField = ownerStruct.staticMembers.get(whitelistField.javaFieldName); - try { - if (!isStatic) { - getter = MethodHandles.publicLookup().unreflectGetter(reflect); - setter = MethodHandles.publicLookup().unreflectSetter(reflect); + if (painlessField == null) { + painlessField = new Field(whitelistField.javaFieldName, javaField.getName(), + ownerStruct, painlessFieldType, javaField.getModifiers(), null, null); + ownerStruct.staticMembers.put(whitelistField.javaFieldName, painlessField); + } else if (painlessField.type.equals(painlessFieldType) == false) { + throw new IllegalArgumentException("illegal duplicate static fields [" + whitelistField.javaFieldName + "] " + + "found within the struct [" + ownerStruct.name + "] with type [" + whitelistField.painlessFieldTypeName + "]"); } - } catch (final IllegalAccessException exception) { - throw new IllegalArgumentException("Getter/Setter [" + name + "]" + - " not found for class [" + owner.clazz.getName() + "]."); - } - - final Field field = new Field(name, reflect.getName(), owner, type, modifiers, getter, setter); + } else { + MethodHandle javaMethodHandleGetter = null; + MethodHandle javaMethodHandleSetter = null; - if (isStatic) { - // require that all static fields are static final - if (!java.lang.reflect.Modifier.isFinal(modifiers)) { - throw new IllegalArgumentException("Static [" + name + "]" + - " within the struct [" + owner.name + "] is not final."); + try { + if (Modifier.isStatic(javaField.getModifiers()) == false) { + javaMethodHandleGetter = MethodHandles.publicLookup().unreflectGetter(javaField); + javaMethodHandleSetter = MethodHandles.publicLookup().unreflectSetter(javaField); + } + } catch (IllegalAccessException exception) { + throw new IllegalArgumentException("getter/setter [" + whitelistField.javaFieldName + "]" + + " not found for class [" + ownerStruct.clazz.getName() + "]."); } - owner.staticMembers.put(name, field); - } else { - owner.members.put(name, field); + Field painlessField = ownerStruct.staticMembers.get(whitelistField.javaFieldName); + + if (painlessField == null) { + painlessField = new Field(whitelistField.javaFieldName, javaField.getName(), + ownerStruct, painlessFieldType, javaField.getModifiers(), javaMethodHandleGetter, javaMethodHandleSetter); + ownerStruct.staticMembers.put(whitelistField.javaFieldName, painlessField); + } else if (painlessField.type.equals(painlessFieldType) == false) { + throw new IllegalArgumentException("illegal duplicate member fields [" + whitelistField.javaFieldName + "] " + + "found within the struct [" + ownerStruct.name + "] with type [" + whitelistField.painlessFieldTypeName + "]"); + } } } @@ -968,8 +976,12 @@ private void copyStruct(String struct, List children) { MethodKey methodKey = kvPair.getKey(); Method method = kvPair.getValue(); if (owner.methods.get(methodKey) == null) { + // TODO: some of these are no longer valid or outright don't work + // TODO: since classes may not come from the Painless classloader + // TODO: and it was dependent on the order of the extends which + // TODO: which no longer exists since this is generated automatically // sanity check, look for missing covariant/generic override - if (owner.clazz.isInterface() && child.clazz == Object.class) { + /*if (owner.clazz.isInterface() && child.clazz == Object.class) { // ok } else if (child.clazz == Spliterator.OfPrimitive.class || child.clazz == PrimitiveIterator.class) { // ok, we rely on generics erasure for these (its guaranteed in the javadocs though!!!!) @@ -1009,7 +1021,7 @@ private void copyStruct(String struct, List children) { } catch (ReflectiveOperationException e) { throw new AssertionError(e); } - } + }*/ owner.methods.put(methodKey, method); } } @@ -1104,7 +1116,7 @@ private Method computeFunctionalInterfaceMethod(Struct clazz) { if (methods.size() != 1) { if (hasAnnotation) { throw new IllegalArgumentException("Class: " + clazz.name + - " is marked with FunctionalInterface but doesn't fit the bill: " + methods); + " is marked with FunctionalInterface but doesn't fit the bill: " + methods); } return null; } @@ -1113,7 +1125,7 @@ private Method computeFunctionalInterfaceMethod(Struct clazz) { Method painless = clazz.methods.get(new Definition.MethodKey(oneMethod.getName(), oneMethod.getParameterCount())); if (painless == null || painless.method.equals(org.objectweb.asm.commons.Method.getMethod(oneMethod)) == false) { throw new IllegalArgumentException("Class: " + clazz.name + " is functional but the functional " + - "method is not whitelisted!"); + "method is not whitelisted!"); } return painless; } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Whitelist.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Whitelist.java new file mode 100644 index 0000000000000..7fd3493d51701 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Whitelist.java @@ -0,0 +1,198 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +/** + * Whitelist contains data structures designed to be used to generate a white-list of Java classes, + * constructors, methods, and fields that can be used within a Painless script at both compile-time + * and run-time. + * + * A white-list consists of several pieces with {@link Struct}s as the top level. Each {@link Struct} + * will contain zero-to-many {@link Constructor}s, {@link Method}s, and {@link Field}s which are what + * will be available with a Painless script. See each individual white-list object for more detail. + */ +public final class Whitelist { + + /** + * Struct represents the equivalent of a Java class in Painless complete with super classes, + * constructors, methods, and fields. In Painless a class is known as a struct primarily to avoid + * naming conflicts internally. There must be a one-to-one mapping of struct names to Java classes. + * Though, since multiple white-lists may be combined into a single white-list for a specific + * {@link org.elasticsearch.script.ScriptContext}, as long as multiple structs representing the same + * Java class have the same Painless type name and have legal constructor/method overloading they + * can be merged together. + * + * Structs in Painless allow for arity overloading for constructors and methods. Arity overloading + * means that multiple constructors are allowed for a single struct as long as they have a different + * number of parameter types, and multiples methods with the same name are allowed for a single struct + * as long as they have the same return type and a different number of parameter types. + * + * Structs will automatically extend other white-listed structs if the Java class they represent is a + * subclass of other structs including Java interfaces. + */ + public static final class Struct { + + /** Information about where this struct was white-listed from. Can be used for error messages. */ + public final String origin; + + /** The Painless name of this struct which will also be the name of a type in a Painless script. */ + public final String painlessTypeName; + + /** The Java class name this struct represents. */ + public final String javaClassName; + + /** The {@link List} of white-listed ({@link Constructor}s) available to this struct. */ + public final List whitelistConstructors; + + /** The {@link List} of white-listed ({@link Method}s) available to this struct. */ + public final List whitelistMethods; + + /** The {@link List} of white-listed ({@link Field}s) available to this struct. */ + public final List whitelistFields; + + /** Standard constructor. All values must be not {@code null}. */ + public Struct(String origin, String painlessTypeName, String javaClassName, + List whitelistConstructors, List whitelistMethods, List whitelistFields) { + this.origin = Objects.requireNonNull(origin); + this.painlessTypeName = Objects.requireNonNull(painlessTypeName); + this.javaClassName = Objects.requireNonNull(javaClassName); + + this.whitelistConstructors = Collections.unmodifiableList(Objects.requireNonNull(whitelistConstructors)); + this.whitelistMethods = Collections.unmodifiableList(Objects.requireNonNull(whitelistMethods)); + this.whitelistFields = Collections.unmodifiableList(Objects.requireNonNull(whitelistFields)); + } + } + + /** + * Constructor represents the equivalent of a Java constructor available as a white-listed struct + * constructor within Painless. Constructors for Painless structs may be accessed exactly as + * constructors for Java classes are using the 'new' keyword. Painless structs may have multiple + * constructors as long as they comply with arity overloading described for {@link Struct}. + */ + public static final class Constructor { + + /** Information about where this constructor was white-listed from. Can be used for error messages. */ + public final String origin; + + /** + * A {@link List} of {@link String}s that are the Painless type names for the parameters of the + * constructor which can be used to look up the Java constructor through reflection. + */ + public final List painlessParameterTypeNames; + + /** Standard constructor. All values must be not {@code null}. */ + public Constructor(String origin, List painlessParameterTypeNames) { + this.origin = Objects.requireNonNull(origin); + this.painlessParameterTypeNames = Collections.unmodifiableList(Objects.requireNonNull(painlessParameterTypeNames)); + } + } + + /** + * Method represents the equivalent of a Java method available as a white-listed struct method + * within Painless. Methods for Painless structs may be accessed exactly as methods for Java classes + * are using the '.' operator on an existing struct variable/field. Painless structs may have multiple + * methods with the same name as long as they comply with arity overloading described for {@link Method}. + * + * Structs may also have additional methods that are not part of the Java class the struct represents - + * these are known as augmented methods. An augmented method can be added to a struct as a part of any + * Java class as long as the method is static and the first parameter of the method is the Java class + * represented by the struct. Note that the augmented method's parent Java class does not need to be + * white-listed. + */ + public static class Method { + + /** Information about where this method was white-listed from. Can be used for error messages. */ + public final String origin; + + /** + * The Java class name for the owner of an augmented method. If the method is not augmented + * this should be {@code null}. + */ + public final String javaAugmentedClassName; + + /** The Java method name used to look up the Java method through reflection. */ + public final String javaMethodName; + + /** + * The Painless type name for the return type of the method which can be used to look up the Java + * method through reflection. + */ + public final String painlessReturnTypeName; + + /** + * A {@link List} of {@link String}s that are the Painless type names for the parameters of the + * method which can be used to look up the Java method through reflection. + */ + public final List painlessParameterTypeNames; + + /** + * Standard constructor. All values must be not {@code null} with the exception of jAugmentedClass; + * jAugmentedClass will be {@code null} unless the method is augmented as described in the class documentation. + */ + public Method(String origin, String javaAugmentedClassName, String javaMethodName, + String painlessReturnTypeName, List painlessParameterTypeNames) { + this.origin = Objects.requireNonNull(origin); + this.javaAugmentedClassName = javaAugmentedClassName; + this.javaMethodName = javaMethodName; + this.painlessReturnTypeName = Objects.requireNonNull(painlessReturnTypeName); + this.painlessParameterTypeNames = Collections.unmodifiableList(Objects.requireNonNull(painlessParameterTypeNames)); + } + } + + /** + * Field represents the equivalent of a Java field available as a white-listed struct field + * within Painless. Fields for Painless structs may be accessed exactly as fields for Java classes + * are using the '.' operator on an existing struct variable/field. + */ + public static class Field { + + /** Information about where this method was white-listed from. Can be used for error messages. */ + public final String origin; + + /** The Java field name used to look up the Java field through reflection. */ + public final String javaFieldName; + + /** The Painless type name for the field which can be used to look up the Java field through reflection. */ + public final String painlessFieldTypeName; + + /** Standard constructor. All values must be not {@code null}. */ + public Field(String origin, String javaFieldName, String painlessFieldTypeName) { + this.origin = Objects.requireNonNull(origin); + this.javaFieldName = Objects.requireNonNull(javaFieldName); + this.painlessFieldTypeName = Objects.requireNonNull(painlessFieldTypeName); + } + } + + /** The {@link ClassLoader} used to look up the white-listed Java classes, constructors, methods, and fields. */ + public final ClassLoader javaClassLoader; + + /** The {@link List} of all the white-listed Painless structs. */ + public final List whitelistStructs; + + /** Standard constructor. All values must be not {@code null}. */ + public Whitelist(ClassLoader javaClassLoader, List whitelistStructs) { + this.javaClassLoader = Objects.requireNonNull(javaClassLoader); + this.whitelistStructs = Collections.unmodifiableList(Objects.requireNonNull(whitelistStructs)); + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WhitelistLoader.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WhitelistLoader.java new file mode 100644 index 0000000000000..ad33d9c7ba5b7 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WhitelistLoader.java @@ -0,0 +1,290 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import java.io.InputStreamReader; +import java.io.LineNumberReader; +import java.lang.reflect.Constructor; +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/** Loads and creates a {@link Whitelist} from one to many text files. */ +public final class WhitelistLoader { + + /** + * Loads and creates a {@link Whitelist} from one to many text files. The file paths are passed in as an array of + * {@link String}s with a single {@link Class} to be be used to load the resources where each {@link String} + * is the path of a single text file. The {@link Class}'s {@link ClassLoader} will be used to lookup the Java + * reflection objects for each individual {@link Class}, {@link Constructor}, {@link Method}, and {@link Field} + * specified as part of the white-list in the text file. + * + * A single pass is made through each file to collect all the information about each struct, constructor, method, + * and field. Most validation will be done at a later point after all white-lists have been gathered and their + * merging takes place. + * + * The following can be parsed from each white-list text file: + *
    + *
  • Blank lines will be ignored by the parser.
  • + *
  • Comments may be created starting with a pound '#' symbol and end with a newline. These will + * be ignored by the parser.
  • + *
  • Primitive types may be specified starting with 'class' and followed by the Painless type + * name (often the same as the Java type name), an arrow symbol, the Java type name, + * an opening bracket, a newline, a closing bracket, and a final newline.
  • + *
  • Complex types may be specified starting with 'class' and followed by the Painless type name, + * an arrow symbol, the Java class name, a opening bracket, a newline, constructor/method/field + * specifications, a closing bracket, and a final newline. Within a complex type the following + * may be parsed: + *
      + *
    • A constructor may be specified starting with an opening parenthesis, followed by a + * comma-delimited list of Painless type names corresponding to the type/class names for + * the equivalent Java parameter types (these must be white-listed as well), a closing + * parenthesis, and a newline.
    • + *
    • A method may be specified starting with a Painless type name for the return type, + * followed by the Java name of the method (which will also be the Painless name for the + * method), an opening parenthesis, a comma-delimited list of Painless type names + * corresponding to the type/class names for the equivalent Java parameter types + * (these must be white-listed as well), a closing parenthesis, and a newline.
    • + *
    • An augmented method may be specified starting with a Painless type name for the return + * type, followed by the fully qualified Java name of the class the augmented method is + * part of (this class does not need to be white-listed), the Java name of the method + * (which will also be the Painless name for the method), an opening parenthesis, a + * comma-delimited list of Painless type names corresponding to the type/class names + * for the equivalent Java parameter types (these must be white-listed as well), a closing + * parenthesis, and a newline.
    • + *
    • A field may be specified starting with a Painless type name for the equivalent Java type + * of the field, followed by the Java name of the field (which all be the Painless name + * for the field), and a newline.
    • + *
    + *
+ * + * Note there must be a one-to-one correspondence of Painless type names to Java type/class names. + * If the same Painless type is defined across multiple files and the Java class is the same, all + * specified constructors, methods, and fields will be merged into a single Painless type. The + * Painless dynamic type, 'def', used as part of constructor, method, and field definitions will + * be appropriately parsed and handled. + * + * The following example is used to create a single white-list text file: + * + * {@code + * # primitive types + * + * class int -> int { + * } + * + * # complex types + * + * class Example -> my.package.Example { + * # constructors + * () + * (int) + * (def, def) + * (Example, def) + * + * # method + * Example add(int, def) + * int add(Example, Example) + * void example() + * + * # augmented + * Example some.other.Class sub(Example, int, def) + * + * # fields + * int value0 + * int value1 + * def value2 + * } + * } + */ + public static Whitelist loadFromResourceFiles(Class resource, String... filepaths) { + List whitelistStructs = new ArrayList<>(); + + // Execute a single pass through the white-list text files. This will gather all the + // constructors, methods, augmented methods, and fields for each white-listed struct. + for (String filepath : filepaths) { + String line; + int number = -1; + + try (LineNumberReader reader = new LineNumberReader( + new InputStreamReader(resource.getResourceAsStream(filepath), StandardCharsets.UTF_8))) { + + String whitelistStructOrigin = null; + String painlessTypeName = null; + String javaClassName = null; + List whitelistConstructors = null; + List whitelistMethods = null; + List whitelistFields = null; + + while ((line = reader.readLine()) != null) { + number = reader.getLineNumber(); + line = line.trim(); + + // Skip any lines that are either blank or comments. + if (line.length() == 0 || line.charAt(0) == '#') { + continue; + } + + // Handle a new struct by resetting all the variables necessary to construct a new Whitelist.Struct for the white-list. + // Expects the following format: 'class' ID -> ID '{' '\n' + if (line.startsWith("class ")) { + // Ensure the final token of the line is '{'. + if (line.endsWith("{") == false) { + throw new IllegalArgumentException( + "invalid struct definition: failed to parse class opening bracket [" + line + "]"); + } + + // Parse the Painless type name and Java class name. + String[] tokens = line.substring(5, line.length() - 1).replaceAll("\\s+", "").split("->"); + + // Ensure the correct number of tokens. + if (tokens.length != 2) { + throw new IllegalArgumentException("invalid struct definition: failed to parse class name [" + line + "]"); + } + + whitelistStructOrigin = "[" + filepath + "]:[" + number + "]"; + painlessTypeName = tokens[0]; + javaClassName = tokens[1]; + + // Reset all the constructors, methods, and fields to support a new struct. + whitelistConstructors = new ArrayList<>(); + whitelistMethods = new ArrayList<>(); + whitelistFields = new ArrayList<>(); + + // Handle the end of a struct, by creating a new Whitelist.Struct with all the previously gathered + // constructors, methods, augmented methods, and fields, and adding it to the list of white-listed structs. + // Expects the following format: '}' '\n' + } else if (line.equals("}")) { + if (painlessTypeName == null) { + throw new IllegalArgumentException("invalid struct definition: extraneous closing bracket"); + } + + whitelistStructs.add(new Whitelist.Struct(whitelistStructOrigin, painlessTypeName, javaClassName, + whitelistConstructors, whitelistMethods, whitelistFields)); + + // Set all the variables to null to ensure a new struct definition is found before other parsable values. + whitelistStructOrigin = null; + painlessTypeName = null; + javaClassName = null; + whitelistConstructors = null; + whitelistMethods = null; + whitelistFields = null; + + // Handle all other valid cases. + } else { + // Mark the origin of this parsable object. + String origin = "[" + filepath + "]:[" + number + "]"; + + // Ensure we have a defined struct before adding any constructors, methods, augmented methods, or fields. + if (painlessTypeName == null) { + throw new IllegalArgumentException("invalid object definition: expected a class name [" + line + "]"); + } + + // Handle the case for a constructor definition. + // Expects the following format: '(' ( ID ( ',' ID )* )? ')' '\n' + if (line.startsWith("(")) { + // Ensure the final token of the line is ')'. + if (line.endsWith(")") == false) { + throw new IllegalArgumentException( + "invalid constructor definition: expected a closing parenthesis [" + line + "]"); + } + + // Parse the constructor parameters. + String[] tokens = line.substring(1, line.length() - 1).replaceAll("\\s+", "").split(","); + + // Handle the case for a constructor with no parameters. + if ("".equals(tokens[0])) { + tokens = new String[0]; + } + + whitelistConstructors.add(new Whitelist.Constructor(origin, Arrays.asList(tokens))); + + // Handle the case for a method or augmented method definition. + // Expects the following format: ID ID? ID '(' ( ID ( ',' ID )* )? ')' '\n' + } else if (line.contains("(")) { + // Ensure the final token of the line is ')'. + if (line.endsWith(")") == false) { + throw new IllegalArgumentException( + "invalid method definition: expected a closing parenthesis [" + line + "]"); + } + + // Parse the tokens prior to the method parameters. + int parameterIndex = line.indexOf('('); + String[] tokens = line.substring(0, parameterIndex).split("\\s+"); + + String javaMethodName; + String javaAugmentedClassName; + + // Based on the number of tokens, look up the Java method name and if provided the Java augmented class. + if (tokens.length == 2) { + javaMethodName = tokens[1]; + javaAugmentedClassName = null; + } else if (tokens.length == 3) { + javaMethodName = tokens[2]; + javaAugmentedClassName = tokens[1]; + } else { + throw new IllegalArgumentException("invalid method definition: unexpected format [" + line + "]"); + } + + String painlessReturnTypeName = tokens[0]; + + // Parse the method parameters. + tokens = line.substring(parameterIndex + 1, line.length() - 1).replaceAll("\\s+", "").split(","); + + // Handle the case for a method with no parameters. + if ("".equals(tokens[0])) { + tokens = new String[0]; + } + + whitelistMethods.add(new Whitelist.Method(origin, javaAugmentedClassName, javaMethodName, + painlessReturnTypeName, Arrays.asList(tokens))); + + // Handle the case for a field definition. + // Expects the following format: ID ID '\n' + } else { + // Parse the field tokens. + String[] tokens = line.split("\\s+"); + + // Ensure the correct number of tokens. + if (tokens.length != 2) { + throw new IllegalArgumentException("invalid field definition: unexpected format [" + line + "]"); + } + + whitelistFields.add(new Whitelist.Field(origin, tokens[1], tokens[0])); + } + } + } + + // Ensure all structs end with a '}' token before the end of the file. + if (painlessTypeName != null) { + throw new IllegalArgumentException("invalid struct definition: expected closing bracket"); + } + } catch (Exception exception) { + throw new RuntimeException("error in [" + filepath + "] at line [" + number + "]", exception); + } + } + + return new Whitelist(resource.getClassLoader(), whitelistStructs); + } + + private WhitelistLoader() {} +} diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.lang.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.lang.txt index 0f8667998209c..a5a414008d753 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.lang.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.lang.txt @@ -36,8 +36,8 @@ class CharSequence -> java.lang.CharSequence { IntStream chars() IntStream codePoints() int length() - String org.elasticsearch.painless.api.Augmentation.replaceAll(Pattern,Function) - String org.elasticsearch.painless.api.Augmentation.replaceFirst(Pattern,Function) + String org.elasticsearch.painless.api.Augmentation replaceAll(Pattern,Function) + String org.elasticsearch.painless.api.Augmentation replaceFirst(Pattern,Function) CharSequence subSequence(int,int) String toString() } @@ -52,18 +52,18 @@ class Iterable -> java.lang.Iterable { void forEach(Consumer) Iterator iterator() Spliterator spliterator() - # some adaptations of groovy methods - boolean org.elasticsearch.painless.api.Augmentation.any(Predicate) - Collection org.elasticsearch.painless.api.Augmentation.asCollection() - List org.elasticsearch.painless.api.Augmentation.asList() - def org.elasticsearch.painless.api.Augmentation.each(Consumer) - def org.elasticsearch.painless.api.Augmentation.eachWithIndex(ObjIntConsumer) - boolean org.elasticsearch.painless.api.Augmentation.every(Predicate) - List org.elasticsearch.painless.api.Augmentation.findResults(Function) - Map org.elasticsearch.painless.api.Augmentation.groupBy(Function) - String org.elasticsearch.painless.api.Augmentation.join(String) - double org.elasticsearch.painless.api.Augmentation.sum() - double org.elasticsearch.painless.api.Augmentation.sum(ToDoubleFunction) + # some adaptations of groovy wmethods + boolean org.elasticsearch.painless.api.Augmentation any(Predicate) + Collection org.elasticsearch.painless.api.Augmentation asCollection() + List org.elasticsearch.painless.api.Augmentation asList() + def org.elasticsearch.painless.api.Augmentation each(Consumer) + def org.elasticsearch.painless.api.Augmentation eachWithIndex(ObjIntConsumer) + boolean org.elasticsearch.painless.api.Augmentation every(Predicate) + List org.elasticsearch.painless.api.Augmentation findResults(Function) + Map org.elasticsearch.painless.api.Augmentation groupBy(Function) + String org.elasticsearch.painless.api.Augmentation join(String) + double org.elasticsearch.painless.api.Augmentation sum() + double org.elasticsearch.painless.api.Augmentation sum(ToDoubleFunction) } # Readable: i/o @@ -72,7 +72,7 @@ class Iterable -> java.lang.Iterable { #### Classes -class Boolean -> java.lang.Boolean extends Comparable,Object { +class Boolean -> java.lang.Boolean { Boolean TRUE Boolean FALSE boolean booleanValue() @@ -87,7 +87,7 @@ class Boolean -> java.lang.Boolean extends Comparable,Object { Boolean valueOf(boolean) } -class Byte -> java.lang.Byte extends Number,Comparable,Object { +class Byte -> java.lang.Byte { int BYTES byte MAX_VALUE byte MIN_VALUE @@ -105,7 +105,7 @@ class Byte -> java.lang.Byte extends Number,Comparable,Object { Byte valueOf(String,int) } -class Character -> java.lang.Character extends Comparable,Object { +class Character -> java.lang.Character { int BYTES byte COMBINING_SPACING_MARK byte CONNECTOR_PUNCTUATION @@ -226,10 +226,10 @@ class Character -> java.lang.Character extends Comparable,Object { Character valueOf(char) } -class Character.Subset -> java.lang.Character$Subset extends Object { +class Character.Subset -> java.lang.Character$Subset { } -class Character.UnicodeBlock -> java.lang.Character$UnicodeBlock extends Character.Subset,Object { +class Character.UnicodeBlock -> java.lang.Character$UnicodeBlock { Character.UnicodeBlock AEGEAN_NUMBERS Character.UnicodeBlock ALCHEMICAL_SYMBOLS Character.UnicodeBlock ALPHABETIC_PRESENTATION_FORMS @@ -459,7 +459,7 @@ class Character.UnicodeBlock -> java.lang.Character$UnicodeBlock extends Charact # ClassValue: ... # Compiler: ... -class Double -> java.lang.Double extends Number,Comparable,Object { +class Double -> java.lang.Double { int BYTES int MAX_EXPONENT double MAX_VALUE @@ -490,13 +490,13 @@ class Double -> java.lang.Double extends Number,Comparable,Object { Double valueOf(double) } -class Enum -> java.lang.Enum extends Comparable,Object { +class Enum -> java.lang.Enum { int compareTo(Enum) - String name(); - int ordinal(); + String name() + int ordinal() } -class Float -> java.lang.Float extends Number,Comparable,Object { +class Float -> java.lang.Float { int BYTES int MAX_EXPONENT float MAX_VALUE @@ -529,7 +529,7 @@ class Float -> java.lang.Float extends Number,Comparable,Object { # InheritableThreadLocal: threads -class Integer -> java.lang.Integer extends Number,Comparable,Object { +class Integer -> java.lang.Integer { int BYTES int MAX_VALUE int MIN_VALUE @@ -569,7 +569,7 @@ class Integer -> java.lang.Integer extends Number,Comparable,Object { Integer valueOf(String,int) } -class Long -> java.lang.Long extends Number,Comparable,Object { +class Long -> java.lang.Long { int BYTES long MAX_VALUE long MIN_VALUE @@ -609,7 +609,7 @@ class Long -> java.lang.Long extends Number,Comparable,Object { Long valueOf(String,int) } -class Math -> java.lang.Math extends Object { +class Math -> java.lang.Math { double E double PI double abs(double) @@ -651,7 +651,7 @@ class Math -> java.lang.Math extends Object { double ulp(double) } -class Number -> java.lang.Number extends Object { +class Number -> java.lang.Number { byte byteValue() short shortValue() int intValue() @@ -674,7 +674,7 @@ class Object -> java.lang.Object { # RuntimePermission: skipped # SecurityManger: skipped -class Short -> java.lang.Short extends Number,Comparable,Object { +class Short -> java.lang.Short { int BYTES short MAX_VALUE short MIN_VALUE @@ -693,8 +693,8 @@ class Short -> java.lang.Short extends Number,Comparable,Object { Short valueOf(String,int) } -class StackTraceElement -> java.lang.StackTraceElement extends Object { - StackTraceElement (String,String,String,int) +class StackTraceElement -> java.lang.StackTraceElement { + (String,String,String,int) String getClassName() String getFileName() int getLineNumber() @@ -702,7 +702,7 @@ class StackTraceElement -> java.lang.StackTraceElement extends Object { boolean isNativeMethod() } -class StrictMath -> java.lang.StrictMath extends Object { +class StrictMath -> java.lang.StrictMath { double E double PI double abs(double) @@ -744,8 +744,8 @@ class StrictMath -> java.lang.StrictMath extends Object { double ulp(double) } -class String -> java.lang.String extends CharSequence,Comparable,Object { - String () +class String -> java.lang.String { + () int codePointAt(int) int codePointBefore(int) int codePointCount(int,int) @@ -756,8 +756,8 @@ class String -> java.lang.String extends CharSequence,Comparable,Object { boolean contentEquals(CharSequence) String copyValueOf(char[]) String copyValueOf(char[],int,int) - String org.elasticsearch.painless.api.Augmentation.decodeBase64() - String org.elasticsearch.painless.api.Augmentation.encodeBase64() + String org.elasticsearch.painless.api.Augmentation decodeBase64() + String org.elasticsearch.painless.api.Augmentation encodeBase64() boolean endsWith(String) boolean equalsIgnoreCase(String) String format(Locale,String,def[]) @@ -786,9 +786,9 @@ class String -> java.lang.String extends CharSequence,Comparable,Object { String valueOf(def) } -class StringBuffer -> java.lang.StringBuffer extends CharSequence,Appendable,Object { - StringBuffer () - StringBuffer (CharSequence) +class StringBuffer -> java.lang.StringBuffer { + () + (CharSequence) StringBuffer append(def) StringBuffer append(CharSequence,int,int) StringBuffer appendCodePoint(int) @@ -813,9 +813,9 @@ class StringBuffer -> java.lang.StringBuffer extends CharSequence,Appendable,Obj String substring(int,int) } -class StringBuilder -> java.lang.StringBuilder extends CharSequence,Appendable,Object { - StringBuilder () - StringBuilder (CharSequence) +class StringBuilder -> java.lang.StringBuilder { + () + (CharSequence) StringBuilder append(def) StringBuilder append(CharSequence,int,int) StringBuilder appendCodePoint(int) @@ -840,7 +840,7 @@ class StringBuilder -> java.lang.StringBuilder extends CharSequence,Appendable,O String substring(int,int) } -class System -> java.lang.System extends Object { +class System -> java.lang.System { void arraycopy(Object,int,Object,int,int) long currentTimeMillis() long nanoTime() @@ -851,12 +851,12 @@ class System -> java.lang.System extends Object { # ThreadLocal: skipped # Throwable: skipped (reserved for painless, users can only catch Exceptions) -class Void -> java.lang.Void extends Object { +class Void -> java.lang.Void { } #### Enums -class Character.UnicodeScript -> java.lang.Character$UnicodeScript extends Enum,Object { +class Character.UnicodeScript -> java.lang.Character$UnicodeScript { Character.UnicodeScript ARABIC Character.UnicodeScript ARMENIAN Character.UnicodeScript AVESTAN @@ -968,138 +968,138 @@ class Character.UnicodeScript -> java.lang.Character$UnicodeScript extends Enum, #### Exceptions -class ArithmeticException -> java.lang.ArithmeticException extends RuntimeException,Exception,Object { - ArithmeticException () - ArithmeticException (String) +class ArithmeticException -> java.lang.ArithmeticException { + () + (String) } -class ArrayIndexOutOfBoundsException -> java.lang.ArrayIndexOutOfBoundsException extends IndexOutOfBoundsException,RuntimeException,Exception,Object { - ArrayIndexOutOfBoundsException () - ArrayIndexOutOfBoundsException (String) +class ArrayIndexOutOfBoundsException -> java.lang.ArrayIndexOutOfBoundsException { + () + (String) } -class ArrayStoreException -> java.lang.ArrayStoreException extends RuntimeException,Exception,Object { - ArrayStoreException () - ArrayStoreException (String) +class ArrayStoreException -> java.lang.ArrayStoreException { + () + (String) } -class ClassCastException -> java.lang.ClassCastException extends RuntimeException,Exception,Object { - ClassCastException () - ClassCastException (String) +class ClassCastException -> java.lang.ClassCastException { + () + (String) } -class ClassNotFoundException -> java.lang.ClassNotFoundException extends ReflectiveOperationException,Exception,Object { - ClassNotFoundException () - ClassNotFoundException (String) +class ClassNotFoundException -> java.lang.ClassNotFoundException { + () + (String) } -class CloneNotSupportedException -> java.lang.CloneNotSupportedException extends Exception,Object { - CloneNotSupportedException () - CloneNotSupportedException (String) +class CloneNotSupportedException -> java.lang.CloneNotSupportedException { + () + (String) } -class EnumConstantNotPresentException -> java.lang.EnumConstantNotPresentException extends RuntimeException,Exception,Object { +class EnumConstantNotPresentException -> java.lang.EnumConstantNotPresentException { String constantName() } -class Exception -> java.lang.Exception extends Object { - Exception () - Exception (String) +class Exception -> java.lang.Exception { + () + (String) String getLocalizedMessage() String getMessage() StackTraceElement[] getStackTrace() } -class IllegalAccessException -> java.lang.IllegalAccessException extends ReflectiveOperationException,Exception,Object { - IllegalAccessException () - IllegalAccessException (String) +class IllegalAccessException -> java.lang.IllegalAccessException { + () + (String) } -class IllegalArgumentException -> java.lang.IllegalArgumentException extends RuntimeException,Exception,Object { - IllegalArgumentException () - IllegalArgumentException (String) +class IllegalArgumentException -> java.lang.IllegalArgumentException { + () + (String) } -class IllegalMonitorStateException -> java.lang.IllegalMonitorStateException extends RuntimeException,Exception,Object { - IllegalMonitorStateException () - IllegalMonitorStateException (String) +class IllegalMonitorStateException -> java.lang.IllegalMonitorStateException { + () + (String) } -class IllegalStateException -> java.lang.IllegalStateException extends RuntimeException,Exception,Object { - IllegalStateException () - IllegalStateException (String) +class IllegalStateException -> java.lang.IllegalStateException { + () + (String) } -class IllegalThreadStateException -> java.lang.IllegalThreadStateException extends IllegalArgumentException,RuntimeException,Exception,Object { - IllegalThreadStateException () - IllegalThreadStateException (String) +class IllegalThreadStateException -> java.lang.IllegalThreadStateException { + () + (String) } -class IndexOutOfBoundsException -> java.lang.IndexOutOfBoundsException extends RuntimeException,Exception,Object { - IndexOutOfBoundsException () - IndexOutOfBoundsException (String) +class IndexOutOfBoundsException -> java.lang.IndexOutOfBoundsException { + () + (String) } -class InstantiationException -> java.lang.InstantiationException extends ReflectiveOperationException,Exception,Object { - InstantiationException () - InstantiationException (String) +class InstantiationException -> java.lang.InstantiationException { + () + (String) } -class InterruptedException -> java.lang.InterruptedException extends Exception,Object { - InterruptedException () - InterruptedException (String) +class InterruptedException -> java.lang.InterruptedException { + () + (String) } -class NegativeArraySizeException -> java.lang.NegativeArraySizeException extends RuntimeException,Exception,Object { - NegativeArraySizeException () - NegativeArraySizeException (String) +class NegativeArraySizeException -> java.lang.NegativeArraySizeException { + () + (String) } -class NoSuchFieldException -> java.lang.NoSuchFieldException extends ReflectiveOperationException,Exception,Object { - NoSuchFieldException () - NoSuchFieldException (String) +class NoSuchFieldException -> java.lang.NoSuchFieldException { + () + (String) } -class NoSuchMethodException -> java.lang.NoSuchMethodException extends ReflectiveOperationException,Exception,Object { - NoSuchMethodException () - NoSuchMethodException (String) +class NoSuchMethodException -> java.lang.NoSuchMethodException { + () + (String) } -class NullPointerException -> java.lang.NullPointerException extends RuntimeException,Exception,Object { - NullPointerException () - NullPointerException (String) +class NullPointerException -> java.lang.NullPointerException { + () + (String) } -class NumberFormatException -> java.lang.NumberFormatException extends RuntimeException,Exception,Object { - NumberFormatException () - NumberFormatException (String) +class NumberFormatException -> java.lang.NumberFormatException { + () + (String) } -class ReflectiveOperationException -> java.lang.ReflectiveOperationException extends Exception,Object { - ReflectiveOperationException () - ReflectiveOperationException (String) +class ReflectiveOperationException -> java.lang.ReflectiveOperationException { + () + (String) } -class RuntimeException -> java.lang.RuntimeException extends Exception,Object { - RuntimeException () - RuntimeException (String) +class RuntimeException -> java.lang.RuntimeException { + () + (String) } -class SecurityException -> java.lang.SecurityException extends RuntimeException,Exception,Object { - SecurityException () - SecurityException (String) +class SecurityException -> java.lang.SecurityException { + () + (String) } -class StringIndexOutOfBoundsException -> java.lang.StringIndexOutOfBoundsException extends IndexOutOfBoundsException,RuntimeException,Exception,Object { - StringIndexOutOfBoundsException () - StringIndexOutOfBoundsException (String) +class StringIndexOutOfBoundsException -> java.lang.StringIndexOutOfBoundsException { + () + (String) } -class TypeNotPresentException -> java.lang.TypeNotPresentException extends RuntimeException,Exception,Object { +class TypeNotPresentException -> java.lang.TypeNotPresentException { String typeName() } -class UnsupportedOperationException -> java.lang.UnsupportedOperationException extends RuntimeException,Exception,Object { - UnsupportedOperationException () - UnsupportedOperationException (String) +class UnsupportedOperationException -> java.lang.UnsupportedOperationException { + () + (String) } diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.math.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.math.txt index e3d13a0959c60..e7457628203a2 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.math.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.math.txt @@ -24,12 +24,12 @@ #### Classes -class BigDecimal -> java.math.BigDecimal extends Number,Comparable,Object { +class BigDecimal -> java.math.BigDecimal { BigDecimal ONE BigDecimal TEN BigDecimal ZERO - BigDecimal (String) - BigDecimal (String,MathContext) + (String) + (String,MathContext) BigDecimal abs() BigDecimal abs(MathContext) BigDecimal add(BigDecimal) @@ -77,12 +77,12 @@ class BigDecimal -> java.math.BigDecimal extends Number,Comparable,Object { BigDecimal valueOf(double) } -class BigInteger -> java.math.BigInteger extends Number,Comparable,Object { +class BigInteger -> java.math.BigInteger { BigInteger ONE BigInteger TEN BigInteger ZERO - BigInteger (String) - BigInteger (String,int) + (String) + (String,int) BigInteger abs() BigInteger add(BigInteger) BigInteger and(BigInteger) @@ -123,20 +123,20 @@ class BigInteger -> java.math.BigInteger extends Number,Comparable,Object { BigInteger xor(BigInteger) } -class MathContext -> java.math.MathContext extends Object { +class MathContext -> java.math.MathContext { MathContext DECIMAL128 MathContext DECIMAL32 MathContext DECIMAL64 MathContext UNLIMITED - MathContext (int) - MathContext (int,RoundingMode) + (int) + (int,RoundingMode) int getPrecision() RoundingMode getRoundingMode() } #### Enums -class RoundingMode -> java.math.RoundingMode extends Enum,Object { +class RoundingMode -> java.math.RoundingMode { RoundingMode CEILING RoundingMode DOWN RoundingMode FLOOR diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.text.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.text.txt index 2e93b14ab3dc6..fa9170cb5d254 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.text.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.text.txt @@ -24,7 +24,7 @@ #### Interfaces -class AttributedCharacterIterator -> java.text.AttributedCharacterIterator extends CharacterIterator { +class AttributedCharacterIterator -> java.text.AttributedCharacterIterator { Set getAllAttributeKeys() def getAttribute(AttributedCharacterIterator.Attribute) Map getAttributes() @@ -50,20 +50,20 @@ class CharacterIterator -> java.text.CharacterIterator { #### Classes -class Annotation -> java.text.Annotation extends Object { - Annotation (Object) +class Annotation -> java.text.Annotation { + (Object) def getValue() } -class AttributedCharacterIterator.Attribute -> java.text.AttributedCharacterIterator$Attribute extends Object { +class AttributedCharacterIterator.Attribute -> java.text.AttributedCharacterIterator$Attribute { AttributedCharacterIterator.Attribute INPUT_METHOD_SEGMENT AttributedCharacterIterator.Attribute LANGUAGE AttributedCharacterIterator.Attribute READING } -class AttributedString -> java.text.AttributedString extends Object { - AttributedString (String) - AttributedString (String,Map) +class AttributedString -> java.text.AttributedString { + (String) + (String,Map) void addAttribute(AttributedCharacterIterator.Attribute,Object) void addAttribute(AttributedCharacterIterator.Attribute,Object,int,int) void addAttributes(Map,int,int) @@ -72,14 +72,14 @@ class AttributedString -> java.text.AttributedString extends Object { AttributedCharacterIterator getIterator(AttributedCharacterIterator.Attribute[],int,int) } -class Bidi -> java.text.Bidi extends Object { +class Bidi -> java.text.Bidi { int DIRECTION_DEFAULT_LEFT_TO_RIGHT int DIRECTION_DEFAULT_RIGHT_TO_LEFT int DIRECTION_LEFT_TO_RIGHT int DIRECTION_RIGHT_TO_LEFT - Bidi (AttributedCharacterIterator) - Bidi (char[],int,byte[],int,int,int) - Bidi (String,int) + (AttributedCharacterIterator) + (char[],int,byte[],int,int,int) + (String,int) boolean baseIsLeftToRight() Bidi createLineBidi(int,int) int getBaseLevel() @@ -96,7 +96,7 @@ class Bidi -> java.text.Bidi extends Object { boolean requiresBidi(char[],int,int) } -class BreakIterator -> java.text.BreakIterator extend Object { +class BreakIterator -> java.text.BreakIterator { int DONE def clone() int current() @@ -121,9 +121,9 @@ class BreakIterator -> java.text.BreakIterator extend Object { void setText(String) } -class ChoiceFormat -> java.text.ChoiceFormat extends NumberFormat,Format,Object { - ChoiceFormat (double[],String[]) - ChoiceFormat (String) +class ChoiceFormat -> java.text.ChoiceFormat { + (double[],String[]) + (String) void applyPattern(String) def[] getFormats() double[] getLimits() @@ -134,7 +134,7 @@ class ChoiceFormat -> java.text.ChoiceFormat extends NumberFormat,Format,Object String toPattern() } -class CollationElementIterator -> java.text.CollationElementIterator extends Object { +class CollationElementIterator -> java.text.CollationElementIterator { int NULLORDER int getMaxExpansion(int) int getOffset() @@ -148,13 +148,13 @@ class CollationElementIterator -> java.text.CollationElementIterator extends Obj short tertiaryOrder(int) } -class CollationKey -> java.text.CollationKey extends Comparable,Object { +class CollationKey -> java.text.CollationKey { int compareTo(CollationKey) String getSourceString() byte[] toByteArray() } -class Collator -> java.text.Collator extends Comparator,Object { +class Collator -> java.text.Collator { int CANONICAL_DECOMPOSITION int FULL_DECOMPOSITION int IDENTICAL @@ -174,7 +174,7 @@ class Collator -> java.text.Collator extends Comparator,Object { void setStrength(int) } -class DateFormat -> java.text.DateFormat extends Format,Object { +class DateFormat -> java.text.DateFormat { int AM_PM_FIELD int DATE_FIELD int DAY_OF_WEEK_FIELD @@ -221,7 +221,7 @@ class DateFormat -> java.text.DateFormat extends Format,Object { void setTimeZone(TimeZone) } -class DateFormat.Field -> java.text.DateFormat$Field extends Format.Field,AttributedCharacterIterator.Attribute,Object { +class DateFormat.Field -> java.text.DateFormat$Field { DateFormat.Field AM_PM DateFormat.Field DAY_OF_MONTH DateFormat.Field DAY_OF_WEEK @@ -244,9 +244,9 @@ class DateFormat.Field -> java.text.DateFormat$Field extends Format.Field,Attrib DateFormat.Field ofCalendarField(int) } -class DateFormatSymbols -> java.text.DateFormatSymbols extends Object { - DateFormatSymbols () - DateFormatSymbols (Locale) +class DateFormatSymbols -> java.text.DateFormatSymbols { + () + (Locale) def clone() String[] getAmPmStrings() Locale[] getAvailableLocales() @@ -270,10 +270,10 @@ class DateFormatSymbols -> java.text.DateFormatSymbols extends Object { void setZoneStrings(String[][]) } -class DecimalFormat -> java.text.DecimalFormat extends NumberFormat,Format,Object { - DecimalFormat () - DecimalFormat (String) - DecimalFormat (String,DecimalFormatSymbols) +class DecimalFormat -> java.text.DecimalFormat { + () + (String) + (String,DecimalFormatSymbols) void applyLocalizedPattern(String) void applyPattern(String) DecimalFormatSymbols getDecimalFormatSymbols() @@ -298,9 +298,9 @@ class DecimalFormat -> java.text.DecimalFormat extends NumberFormat,Format,Objec String toPattern() } -class DecimalFormatSymbols -> java.text.DecimalFormatSymbols extends Object { - DecimalFormatSymbols () - DecimalFormatSymbols (Locale) +class DecimalFormatSymbols -> java.text.DecimalFormatSymbols { + () + (Locale) def clone() Locale[] getAvailableLocales() Currency getCurrency() @@ -337,9 +337,9 @@ class DecimalFormatSymbols -> java.text.DecimalFormatSymbols extends Object { void setZeroDigit(char) } -class FieldPosition -> java.text.FieldPosition extends Object { - FieldPosition (int) - FieldPosition (Format.Field,int) +class FieldPosition -> java.text.FieldPosition { + (int) + (Format.Field,int) int getBeginIndex() int getEndIndex() int getField() @@ -348,7 +348,7 @@ class FieldPosition -> java.text.FieldPosition extends Object { void setEndIndex(int) } -class Format -> java.text.Format extends Object { +class Format -> java.text.Format { def clone() String format(Object) StringBuffer format(Object,StringBuffer,FieldPosition) @@ -357,10 +357,10 @@ class Format -> java.text.Format extends Object { Object parseObject(String,ParsePosition) } -class Format.Field -> java.text.Format$Field extends AttributedCharacterIterator.Attribute,Object { +class Format.Field -> java.text.Format$Field { } -class MessageFormat -> java.text.MessageFormat extends Format,Object { +class MessageFormat -> java.text.MessageFormat { void applyPattern(String) String format(String,Object[]) Format[] getFormats() @@ -376,16 +376,16 @@ class MessageFormat -> java.text.MessageFormat extends Format,Object { String toPattern() } -class MessageFormat.Field -> java.text.MessageFormat$Field extends Format.Field,AttributedCharacterIterator.Attribute,Object { +class MessageFormat.Field -> java.text.MessageFormat$Field { MessageFormat.Field ARGUMENT } -class Normalizer -> java.text.Normalizer extends Object { +class Normalizer -> java.text.Normalizer { boolean isNormalized(CharSequence,Normalizer.Form) String normalize(CharSequence,Normalizer.Form) } -class NumberFormat -> java.text.NumberFormat extends Format,Object { +class NumberFormat -> java.text.NumberFormat { int FRACTION_FIELD int INTEGER_FIELD Locale[] getAvailableLocales() @@ -419,7 +419,7 @@ class NumberFormat -> java.text.NumberFormat extends Format,Object { void setRoundingMode(RoundingMode) } -class NumberFormat.Field -> java.text.NumberFormat$Field extends Format.Field,AttributedCharacterIterator.Attribute,Object { +class NumberFormat.Field -> java.text.NumberFormat$Field { NumberFormat.Field CURRENCY NumberFormat.Field DECIMAL_SEPARATOR NumberFormat.Field EXPONENT @@ -433,24 +433,24 @@ class NumberFormat.Field -> java.text.NumberFormat$Field extends Format.Field,At NumberFormat.Field SIGN } -class ParsePosition -> java.text.ParsePosition extends Object { - ParsePosition (int) +class ParsePosition -> java.text.ParsePosition { + (int) int getErrorIndex() int getIndex() void setErrorIndex(int) void setIndex(int) } -class RuleBasedCollator -> java.text.RuleBasedCollator extends Collator,Comparator,Object { - RuleBasedCollator (String) +class RuleBasedCollator -> java.text.RuleBasedCollator { + (String) CollationElementIterator getCollationElementIterator(String) String getRules() } -class SimpleDateFormat -> java.text.SimpleDateFormat extends DateFormat,Format,Object { - SimpleDateFormat () - SimpleDateFormat (String) - SimpleDateFormat (String,Locale) +class SimpleDateFormat -> java.text.SimpleDateFormat { + () + (String) + (String,Locale) void applyLocalizedPattern(String) void applyPattern(String) Date get2DigitYearStart() @@ -461,16 +461,16 @@ class SimpleDateFormat -> java.text.SimpleDateFormat extends DateFormat,Format,O String toPattern() } -class StringCharacterIterator -> java.text.StringCharacterIterator extends CharacterIterator,Object { - StringCharacterIterator (String) - StringCharacterIterator (String,int) - StringCharacterIterator (String,int,int,int) +class StringCharacterIterator -> java.text.StringCharacterIterator { + (String) + (String,int) + (String,int,int,int) void setText(String) } #### Enums -class Normalizer.Form -> java.text.Normalizer$Form extends Enum,Object { +class Normalizer.Form -> java.text.Normalizer$Form { Normalizer.Form NFC Normalizer.Form NFD Normalizer.Form NFKC @@ -481,7 +481,7 @@ class Normalizer.Form -> java.text.Normalizer$Form extends Enum,Object { #### Exceptions -class ParseException -> java.text.ParseException extends Exception,Object { - ParseException (String,int) +class ParseException -> java.text.ParseException { + (String,int) int getErrorOffset() } diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.chrono.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.chrono.txt index 286141e29215b..2d932a3ed1a57 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.chrono.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.chrono.txt @@ -24,7 +24,7 @@ #### Interfaces -class ChronoLocalDate -> java.time.chrono.ChronoLocalDate extends Temporal,TemporalAccessor,TemporalAdjuster,Comparable { +class ChronoLocalDate -> java.time.chrono.ChronoLocalDate { ChronoLocalDateTime atTime(LocalTime) int compareTo(ChronoLocalDate) boolean equals(Object) @@ -51,7 +51,7 @@ class ChronoLocalDate -> java.time.chrono.ChronoLocalDate extends Temporal,Tempo ChronoLocalDate with(TemporalField,long) } -class ChronoLocalDateTime -> java.time.chrono.ChronoLocalDateTime extends Temporal,TemporalAccessor,TemporalAdjuster,Comparable { +class ChronoLocalDateTime -> java.time.chrono.ChronoLocalDateTime { ChronoZonedDateTime atZone(ZoneId) int compareTo(ChronoLocalDateTime) boolean equals(Object) @@ -76,7 +76,7 @@ class ChronoLocalDateTime -> java.time.chrono.ChronoLocalDateTime extends Tempor ChronoLocalDateTime with(TemporalField,long) } -class Chronology -> java.time.chrono.Chronology extends Comparable { +class Chronology -> java.time.chrono.Chronology { int compareTo(Chronology) ChronoLocalDate date(TemporalAccessor) ChronoLocalDate date(Era,int,int,int) @@ -106,7 +106,7 @@ class Chronology -> java.time.chrono.Chronology extends Comparable { ChronoZonedDateTime zonedDateTime(Instant,ZoneId) } -class ChronoPeriod -> java.time.chrono.ChronoPeriod extends TemporalAmount { +class ChronoPeriod -> java.time.chrono.ChronoPeriod { ChronoPeriod between(ChronoLocalDate,ChronoLocalDate) boolean equals(Object) Chronology getChronology() @@ -122,7 +122,7 @@ class ChronoPeriod -> java.time.chrono.ChronoPeriod extends TemporalAmount { String toString() } -class ChronoZonedDateTime -> java.time.chrono.ChronoZonedDateTime extends Temporal,TemporalAccessor,Comparable { +class ChronoZonedDateTime -> java.time.chrono.ChronoZonedDateTime { int compareTo(ChronoZonedDateTime) boolean equals(Object) String format(DateTimeFormatter) @@ -153,17 +153,17 @@ class ChronoZonedDateTime -> java.time.chrono.ChronoZonedDateTime extends Tempor ChronoZonedDateTime withZoneSameInstant(ZoneId) } -class Era -> java.time.chrono.Era extends TemporalAccessor,TemporalAdjuster { +class Era -> java.time.chrono.Era { String getDisplayName(TextStyle,Locale) int getValue() } #### Classes -class AbstractChronology -> java.time.chrono.Chronology extends Chronology,Comparable,Object { +class AbstractChronology -> java.time.chrono.AbstractChronology { } -class HijrahChronology -> java.time.chrono.HijrahChronology extends AbstractChronology,Chronology,Comparable,Object { +class HijrahChronology -> java.time.chrono.HijrahChronology { HijrahChronology INSTANCE HijrahDate date(TemporalAccessor) HijrahDate date(int,int,int) @@ -175,7 +175,7 @@ class HijrahChronology -> java.time.chrono.HijrahChronology extends AbstractChro HijrahDate resolveDate(Map,ResolverStyle) } -class HijrahDate -> java.time.chrono.HijrahDate extends ChronoLocalDate,Temporal,TemporalAccessor,TemporalAdjuster,Comparable,Object { +class HijrahDate -> java.time.chrono.HijrahDate { HijrahDate from(TemporalAccessor) HijrahChronology getChronology() HijrahEra getEra() @@ -189,7 +189,7 @@ class HijrahDate -> java.time.chrono.HijrahDate extends ChronoLocalDate,Temporal HijrahDate withVariant(HijrahChronology) } -class IsoChronology -> java.time.chrono.IsoChronology extends AbstractChronology,Chronology,Comparable,Object { +class IsoChronology -> java.time.chrono.IsoChronology { IsoChronology INSTANCE LocalDate date(TemporalAccessor) LocalDate date(int,int,int) @@ -205,7 +205,7 @@ class IsoChronology -> java.time.chrono.IsoChronology extends AbstractChronology ZonedDateTime zonedDateTime(Instant,ZoneId) } -class JapaneseChronology -> java.time.chrono.JapaneseChronology extends AbstractChronology,Chronology,Comparable,Object { +class JapaneseChronology -> java.time.chrono.JapaneseChronology { JapaneseChronology INSTANCE JapaneseDate date(TemporalAccessor) JapaneseDate date(int,int,int) @@ -217,7 +217,7 @@ class JapaneseChronology -> java.time.chrono.JapaneseChronology extends Abstract JapaneseDate resolveDate(Map,ResolverStyle) } -class JapaneseDate -> java.time.chrono.JapaneseDate extends ChronoLocalDate,Temporal,TemporalAccessor,TemporalAdjuster,Comparable,Object { +class JapaneseDate -> java.time.chrono.JapaneseDate { JapaneseDate of(int,int,int) JapaneseDate from(TemporalAccessor) JapaneseChronology getChronology() @@ -230,7 +230,7 @@ class JapaneseDate -> java.time.chrono.JapaneseDate extends ChronoLocalDate,Temp JapaneseDate minus(long,TemporalUnit) } -class JapaneseEra -> java.time.chrono.JapaneseEra extends Era,TemporalAccessor,TemporalAdjuster,Object { +class JapaneseEra -> java.time.chrono.JapaneseEra { JapaneseEra HEISEI JapaneseEra MEIJI JapaneseEra SHOWA @@ -241,7 +241,7 @@ class JapaneseEra -> java.time.chrono.JapaneseEra extends Era,TemporalAccessor,T JapaneseEra[] values() } -class MinguoChronology -> java.time.chrono.MinguoChronology extends AbstractChronology,Chronology,Comparable,Object { +class MinguoChronology -> java.time.chrono.MinguoChronology { MinguoChronology INSTANCE MinguoDate date(TemporalAccessor) MinguoDate date(int,int,int) @@ -253,7 +253,7 @@ class MinguoChronology -> java.time.chrono.MinguoChronology extends AbstractChro MinguoDate resolveDate(Map,ResolverStyle) } -class MinguoDate -> java.time.chrono.MinguoDate extends ChronoLocalDate,Temporal,TemporalAccessor,TemporalAdjuster,Comparable,Object { +class MinguoDate -> java.time.chrono.MinguoDate { MinguoDate of(int,int,int) MinguoDate from(TemporalAccessor) MinguoChronology getChronology() @@ -266,7 +266,7 @@ class MinguoDate -> java.time.chrono.MinguoDate extends ChronoLocalDate,Temporal MinguoDate minus(long,TemporalUnit) } -class ThaiBuddhistChronology -> java.time.chrono.ThaiBuddhistChronology extends AbstractChronology,Chronology,Comparable,Object { +class ThaiBuddhistChronology -> java.time.chrono.ThaiBuddhistChronology { ThaiBuddhistChronology INSTANCE ThaiBuddhistDate date(TemporalAccessor) ThaiBuddhistDate date(int,int,int) @@ -278,7 +278,7 @@ class ThaiBuddhistChronology -> java.time.chrono.ThaiBuddhistChronology extends ThaiBuddhistDate resolveDate(Map,ResolverStyle) } -class ThaiBuddhistDate -> java.time.chrono.ThaiBuddhistDate extends ChronoLocalDate,Temporal,TemporalAccessor,TemporalAdjuster,Comparable,Object { +class ThaiBuddhistDate -> java.time.chrono.ThaiBuddhistDate { ThaiBuddhistDate of(int,int,int) ThaiBuddhistDate from(TemporalAccessor) ThaiBuddhistChronology getChronology() @@ -293,7 +293,7 @@ class ThaiBuddhistDate -> java.time.chrono.ThaiBuddhistDate extends ChronoLocalD #### Enums -class HijrahEra -> java.time.chrono.HijrahEra extends Enum,Comparable,Era,TemporalAccessor,TemporalAdjuster,Object { +class HijrahEra -> java.time.chrono.HijrahEra { HijrahEra AH int getValue() HijrahEra of(int) @@ -301,7 +301,7 @@ class HijrahEra -> java.time.chrono.HijrahEra extends Enum,Comparable,Era,Tempor HijrahEra[] values() } -class IsoEra -> java.time.chrono.IsoEra extends Enum,Comparable,Era,TemporalAccessor,TemporalAdjuster,Object { +class IsoEra -> java.time.chrono.IsoEra { IsoEra BCE IsoEra CE int getValue() @@ -310,7 +310,7 @@ class IsoEra -> java.time.chrono.IsoEra extends Enum,Comparable,Era,TemporalAcce IsoEra[] values() } -class MinguoEra -> java.time.chrono.MinguoEra extends Enum,Comparable,Era,TemporalAccessor,TemporalAdjuster,Object { +class MinguoEra -> java.time.chrono.MinguoEra { MinguoEra BEFORE_ROC MinguoEra ROC int getValue() @@ -319,7 +319,7 @@ class MinguoEra -> java.time.chrono.MinguoEra extends Enum,Comparable,Era,Tempor MinguoEra[] values() } -class ThaiBuddhistEra -> java.time.chrono.ThaiBuddhistEra extends Enum,Comparable,Era,TemporalAccessor,TemporalAdjuster,Object { +class ThaiBuddhistEra -> java.time.chrono.ThaiBuddhistEra { ThaiBuddhistEra BE ThaiBuddhistEra BEFORE_BE int getValue() diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.format.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.format.txt index 20831c4b6b4a7..d5b5c9cc35541 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.format.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.format.txt @@ -24,7 +24,7 @@ #### Classes -class DateTimeFormatter -> java.time.format.DateTimeFormatter extends Object { +class DateTimeFormatter -> java.time.format.DateTimeFormatter { DateTimeFormatter BASIC_ISO_DATE DateTimeFormatter ISO_DATE DateTimeFormatter ISO_DATE_TIME @@ -70,8 +70,8 @@ class DateTimeFormatter -> java.time.format.DateTimeFormatter extends Object { DateTimeFormatter withZone(ZoneId) } -class DateTimeFormatterBuilder -> java.time.format.DateTimeFormatterBuilder extends Object { - DateTimeFormatterBuilder () +class DateTimeFormatterBuilder -> java.time.format.DateTimeFormatterBuilder { + () DateTimeFormatterBuilder append(DateTimeFormatter) DateTimeFormatterBuilder appendChronologyId() DateTimeFormatterBuilder appendChronologyText(TextStyle) @@ -110,7 +110,7 @@ class DateTimeFormatterBuilder -> java.time.format.DateTimeFormatterBuilder exte DateTimeFormatter toFormatter(Locale) } -class DecimalStyle -> java.time.format.DecimalStyle extends Object { +class DecimalStyle -> java.time.format.DecimalStyle { DecimalStyle STANDARD Set getAvailableLocales() char getDecimalSeparator() @@ -127,7 +127,7 @@ class DecimalStyle -> java.time.format.DecimalStyle extends Object { #### Enums -class FormatStyle -> java.time.format.FormatStyle extends Enum,Comparable,Object { +class FormatStyle -> java.time.format.FormatStyle { FormatStyle FULL FormatStyle LONG FormatStyle MEDIUM @@ -136,7 +136,7 @@ class FormatStyle -> java.time.format.FormatStyle extends Enum,Comparable,Object FormatStyle[] values() } -class ResolverStyle -> java.time.format.ResolverStyle extends Enum,Comparable,Object { +class ResolverStyle -> java.time.format.ResolverStyle { ResolverStyle LENIENT ResolverStyle SMART ResolverStyle STRICT @@ -144,7 +144,7 @@ class ResolverStyle -> java.time.format.ResolverStyle extends Enum,Comparable,Ob ResolverStyle[] values() } -class SignStyle -> java.time.format.SignStyle extends Enum,Comparable,Object { +class SignStyle -> java.time.format.SignStyle { SignStyle ALWAYS SignStyle EXCEEDS_PAD SignStyle NEVER @@ -154,7 +154,7 @@ class SignStyle -> java.time.format.SignStyle extends Enum,Comparable,Object { SignStyle[] values() } -class TextStyle -> java.time.format.TextStyle extends Enum,Comparable,Object { +class TextStyle -> java.time.format.TextStyle { TextStyle FULL TextStyle FULL_STANDALONE TextStyle NARROW @@ -170,8 +170,8 @@ class TextStyle -> java.time.format.TextStyle extends Enum,Comparable,Object { #### Exceptions -class DateTimeParseException -> java.time.format.DateTimeParseException extends DateTimeException,RuntimeException,Exception,Object { - DateTimeParseException (String,CharSequence,int) +class DateTimeParseException -> java.time.format.DateTimeParseException { + (String,CharSequence,int) int getErrorIndex() String getParsedString() } diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.temporal.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.temporal.txt index 9094dab6ba18a..e3c09bc625521 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.temporal.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.temporal.txt @@ -24,7 +24,7 @@ #### Interfaces -class Temporal -> java.time.temporal.Temporal extends TemporalAccessor { +class Temporal -> java.time.temporal.Temporal { Temporal minus(long,TemporalUnit) Temporal minus(TemporalAmount) Temporal plus(long,TemporalUnit) @@ -85,7 +85,7 @@ class TemporalUnit -> java.time.temporal.TemporalUnit { #### Classes -class IsoFields -> java.time.temporal.IsoFields extends Object { +class IsoFields -> java.time.temporal.IsoFields { TemporalField DAY_OF_QUARTER TemporalField QUARTER_OF_YEAR TemporalUnit QUARTER_YEARS @@ -94,13 +94,13 @@ class IsoFields -> java.time.temporal.IsoFields extends Object { TemporalField WEEK_OF_WEEK_BASED_YEAR } -class JulianFields -> java.time.temporal.JulianFields extends Object { +class JulianFields -> java.time.temporal.JulianFields { TemporalField JULIAN_DAY TemporalField MODIFIED_JULIAN_DAY TemporalField RATA_DIE } -class TemporalAdjusters -> java.time.temporal.TemporalAdjusters extends Object { +class TemporalAdjusters -> java.time.temporal.TemporalAdjusters { TemporalAdjuster dayOfWeekInMonth(int,DayOfWeek) TemporalAdjuster firstDayOfMonth() TemporalAdjuster firstDayOfNextMonth() @@ -117,7 +117,7 @@ class TemporalAdjusters -> java.time.temporal.TemporalAdjusters extends Object { TemporalAdjuster previousOrSame(DayOfWeek) } -class TemporalQueries -> java.time.temporal.TemporalQueries extends Object { +class TemporalQueries -> java.time.temporal.TemporalQueries { TemporalQuery chronology() TemporalQuery localDate() TemporalQuery localTime() @@ -127,7 +127,7 @@ class TemporalQueries -> java.time.temporal.TemporalQueries extends Object { TemporalQuery zoneId() } -class ValueRange -> java.time.temporal.ValueRange extends Object { +class ValueRange -> java.time.temporal.ValueRange { int checkValidIntValue(long,TemporalField) long checkValidValue(long,TemporalField) long getLargestMinimum() @@ -143,7 +143,7 @@ class ValueRange -> java.time.temporal.ValueRange extends Object { ValueRange of(long,long,long,long) } -class WeekFields -> java.time.temporal.WeekFields extends Object { +class WeekFields -> java.time.temporal.WeekFields { WeekFields ISO WeekFields SUNDAY_START TemporalUnit WEEK_BASED_YEARS @@ -160,7 +160,7 @@ class WeekFields -> java.time.temporal.WeekFields extends Object { #### Enums -class ChronoField -> java.time.temporal.ChronoField extends Enum,Comparable,TemporalField,Object { +class ChronoField -> java.time.temporal.ChronoField { ChronoField ALIGNED_DAY_OF_WEEK_IN_MONTH ChronoField ALIGNED_DAY_OF_WEEK_IN_YEAR ChronoField ALIGNED_WEEK_OF_MONTH @@ -197,7 +197,7 @@ class ChronoField -> java.time.temporal.ChronoField extends Enum,Comparable,Temp ChronoField[] values() } -class ChronoUnit -> java.time.temporal.ChronoUnit extends Enum,Comparable,TemporalUnit,Object { +class ChronoUnit -> java.time.temporal.ChronoUnit { ChronoUnit CENTURIES ChronoUnit DAYS ChronoUnit DECADES @@ -220,6 +220,6 @@ class ChronoUnit -> java.time.temporal.ChronoUnit extends Enum,Comparable,Tempor #### Exceptions -class UnsupportedTemporalTypeException -> java.time.temporal.UnsupportedTemporalTypeException extends DateTimeException,RuntimeException,Exception,Object { - UnsupportedTemporalTypeException (String) +class UnsupportedTemporalTypeException -> java.time.temporal.UnsupportedTemporalTypeException { + (String) } diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.txt index 35f19b0abddea..1c012042b02c7 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.txt @@ -24,7 +24,7 @@ #### Classes -class Clock -> java.time.Clock extends Object { +class Clock -> java.time.Clock { Clock fixed(Instant,ZoneId) ZoneId getZone() Instant instant() @@ -33,7 +33,7 @@ class Clock -> java.time.Clock extends Object { Clock tick(Clock,Duration) } -class Duration -> java.time.Duration extends Comparable,TemporalAmount,Object { +class Duration -> java.time.Duration { Duration ZERO Duration abs() Duration between(Temporal,Temporal) @@ -57,7 +57,7 @@ class Duration -> java.time.Duration extends Comparable,TemporalAmount,Object { Duration of(long,TemporalUnit) Duration ofDays(long) Duration ofHours(long) - Duration ofMillis(long) + Duration ofMillis(long) Duration ofMinutes(long) Duration ofNanos(long) Duration ofSeconds(long) @@ -80,7 +80,7 @@ class Duration -> java.time.Duration extends Comparable,TemporalAmount,Object { Duration withNanos(int) } -class Instant -> java.time.Instant extends Comparable,Temporal,TemporalAccessor,TemporalAdjuster,Object { +class Instant -> java.time.Instant { Instant EPOCH Instant MAX Instant MIN @@ -112,7 +112,7 @@ class Instant -> java.time.Instant extends Comparable,Temporal,TemporalAccessor, Instant with(TemporalField,long) } -class LocalDate -> java.time.LocalDate extends ChronoLocalDate,Temporal,TemporalAccessor,TemporalAdjuster,Comparable,Object { +class LocalDate -> java.time.LocalDate { LocalDate MAX LocalDate MIN LocalDateTime atStartOfDay() @@ -155,7 +155,7 @@ class LocalDate -> java.time.LocalDate extends ChronoLocalDate,Temporal,Temporal LocalDate withYear(int) } -class LocalDateTime -> java.time.LocalDateTime extends ChronoLocalDateTime,Temporal,TemporalAccessor,TemporalAdjuster,Comparable,Object { +class LocalDateTime -> java.time.LocalDateTime { LocalDateTime MIN LocalDateTime MAX OffsetDateTime atOffset(ZoneOffset) @@ -212,7 +212,7 @@ class LocalDateTime -> java.time.LocalDateTime extends ChronoLocalDateTime,Tempo LocalDateTime withYear(int) } -class LocalTime -> java.time.LocalTime extends Temporal,TemporalAccessor,TemporalAdjuster,Comparable,Object { +class LocalTime -> java.time.LocalTime { LocalTime MAX LocalTime MIDNIGHT LocalTime MIN @@ -258,7 +258,7 @@ class LocalTime -> java.time.LocalTime extends Temporal,TemporalAccessor,Tempora LocalTime withSecond(int) } -class MonthDay -> java.time.MonthDay extends TemporalAccessor,TemporalAdjuster,Comparable,Object { +class MonthDay -> java.time.MonthDay { LocalDate atYear(int) int compareTo(MonthDay) String format(DateTimeFormatter) @@ -270,14 +270,14 @@ class MonthDay -> java.time.MonthDay extends TemporalAccessor,TemporalAdjuster,C boolean isBefore(MonthDay) boolean isValidYear(int) MonthDay of(int,int) - MonthDay parse(CharSequence) + MonthDay parse(CharSequence) MonthDay parse(CharSequence,DateTimeFormatter) MonthDay with(Month) MonthDay withDayOfMonth(int) MonthDay withMonth(int) } -class OffsetDateTime -> java.time.OffsetDateTime extends Temporal,TemporalAccessor,TemporalAdjuster,Comparable,Object { +class OffsetDateTime -> java.time.OffsetDateTime { OffsetDateTime MAX OffsetDateTime MIN ZonedDateTime atZoneSameInstant(ZoneId) @@ -348,7 +348,7 @@ class OffsetDateTime -> java.time.OffsetDateTime extends Temporal,TemporalAccess OffsetDateTime withOffsetSameInstant(ZoneOffset) } -class OffsetTime -> java.time.OffsetTime extends Temporal,TemporalAccessor,TemporalAdjuster,Comparable,Object { +class OffsetTime -> java.time.OffsetTime { OffsetTime MAX OffsetTime MIN int compareTo(OffsetTime) @@ -391,7 +391,7 @@ class OffsetTime -> java.time.OffsetTime extends Temporal,TemporalAccessor,Tempo OffsetTime withSecond(int) } -class Period -> java.time.Period extends ChronoPeriod,TemporalAmount,Object { +class Period -> java.time.Period { Period ZERO Period between(LocalDate,LocalDate) Period from(TemporalAmount) @@ -422,7 +422,7 @@ class Period -> java.time.Period extends ChronoPeriod,TemporalAmount,Object { Period withYears(int) } -class Year -> java.time.Year extends Temporal,TemporalAccessor,TemporalAdjuster,Comparable,Object { +class Year -> java.time.Year { int MAX_VALUE int MIN_VALUE LocalDate atDay(int) @@ -450,7 +450,7 @@ class Year -> java.time.Year extends Temporal,TemporalAccessor,TemporalAdjuster, Year with(TemporalField,long) } -class YearMonth -> java.time.YearMonth extends Temporal,TemporalAccessor,TemporalAdjuster,Comparable,Object { +class YearMonth -> java.time.YearMonth { LocalDate atDay(int) LocalDate atEndOfMonth() int compareTo(YearMonth) @@ -482,7 +482,7 @@ class YearMonth -> java.time.YearMonth extends Temporal,TemporalAccessor,Tempora YearMonth withMonth(int) } -class ZonedDateTime -> java.time.ZonedDateTime extends ChronoZonedDateTime,Temporal,TemporalAccessor,Comparable,Object { +class ZonedDateTime -> java.time.ZonedDateTime { int getDayOfMonth() DayOfWeek getDayOfWeek() int getDayOfYear() @@ -544,7 +544,7 @@ class ZonedDateTime -> java.time.ZonedDateTime extends ChronoZonedDateTime,Tempo ZonedDateTime withZoneSameInstant(ZoneId) } -class ZoneId -> java.time.ZoneId extends Object { +class ZoneId -> java.time.ZoneId { Map SHORT_IDS Set getAvailableZoneIds() ZoneId of(String) @@ -558,7 +558,7 @@ class ZoneId -> java.time.ZoneId extends Object { ZoneRules getRules() } -class ZoneOffset -> java.time.ZoneOffset extends ZoneId,Object { +class ZoneOffset -> java.time.ZoneOffset { ZoneOffset MAX ZoneOffset MIN ZoneOffset UTC @@ -573,7 +573,7 @@ class ZoneOffset -> java.time.ZoneOffset extends ZoneId,Object { #### Enums -class DayOfWeek -> java.time.DayOfWeek extends Enum,TemporalAccessor,TemporalAdjuster,Comparable,Object { +class DayOfWeek -> java.time.DayOfWeek { DayOfWeek FRIDAY DayOfWeek MONDAY DayOfWeek SATURDAY @@ -591,7 +591,7 @@ class DayOfWeek -> java.time.DayOfWeek extends Enum,TemporalAccessor,TemporalAdj DayOfWeek[] values() } -class Month -> java.time.Month extends Enum,TemporalAccessor,TemporalAdjuster,Comparable,Object { +class Month -> java.time.Month { Month APRIL Month AUGUST Month DECEMBER @@ -621,7 +621,7 @@ class Month -> java.time.Month extends Enum,TemporalAccessor,TemporalAdjuster,Co #### Exceptions -class DateTimeException -> java.time.DateTimeException extends RuntimeException,Exception,Object { - DateTimeException (String) +class DateTimeException -> java.time.DateTimeException { + (String) } diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.zone.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.zone.txt index d9d1cce5c104b..dfb6fc7a8076f 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.zone.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.zone.txt @@ -24,7 +24,7 @@ #### Classes -class ZoneOffsetTransition -> java.time.zone.ZoneOffsetTransition extends Comparable,Object { +class ZoneOffsetTransition -> java.time.zone.ZoneOffsetTransition { int compareTo(ZoneOffsetTransition) LocalDateTime getDateTimeAfter() LocalDateTime getDateTimeBefore() @@ -39,7 +39,7 @@ class ZoneOffsetTransition -> java.time.zone.ZoneOffsetTransition extends Compar long toEpochSecond() } -class ZoneOffsetTransitionRule -> java.time.zone.ZoneOffsetTransitionRule extends Object { +class ZoneOffsetTransitionRule -> java.time.zone.ZoneOffsetTransitionRule { ZoneOffsetTransition createTransition(int) int getDayOfMonthIndicator() DayOfWeek getDayOfWeek() @@ -53,7 +53,7 @@ class ZoneOffsetTransitionRule -> java.time.zone.ZoneOffsetTransitionRule extend ZoneOffsetTransitionRule of(Month,int,DayOfWeek,LocalTime,boolean,ZoneOffsetTransitionRule.TimeDefinition,ZoneOffset,ZoneOffset,ZoneOffset) } -class ZoneRules -> java.time.zone.ZoneRules extends Object { +class ZoneRules -> java.time.zone.ZoneRules { Duration getDaylightSavings(Instant) ZoneOffset getOffset(Instant) ZoneOffset getStandardOffset(Instant) @@ -70,7 +70,7 @@ class ZoneRules -> java.time.zone.ZoneRules extends Object { ZoneOffsetTransition previousTransition(Instant) } -class ZoneRulesProvider -> java.time.zone.ZoneRulesProvider extends Object { +class ZoneRulesProvider -> java.time.zone.ZoneRulesProvider { Set getAvailableZoneIds() ZoneRules getRules(String,boolean) NavigableMap getVersions(String) @@ -78,7 +78,7 @@ class ZoneRulesProvider -> java.time.zone.ZoneRulesProvider extends Object { #### Enums -class ZoneOffsetTransitionRule.TimeDefinition -> java.time.zone.ZoneOffsetTransitionRule$TimeDefinition extends Enum,Comparable,Object { +class ZoneOffsetTransitionRule.TimeDefinition -> java.time.zone.ZoneOffsetTransitionRule$TimeDefinition { ZoneOffsetTransitionRule.TimeDefinition STANDARD ZoneOffsetTransitionRule.TimeDefinition UTC ZoneOffsetTransitionRule.TimeDefinition WALL @@ -89,6 +89,6 @@ class ZoneOffsetTransitionRule.TimeDefinition -> java.time.zone.ZoneOffsetTransi #### Exceptions -class ZoneRulesException -> java.time.zone.ZoneRulesException extends DateTimeException,RuntimeException,Exception,Object { - ZoneRulesException (String) +class ZoneRulesException -> java.time.zone.ZoneRulesException { + (String) } diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.function.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.function.txt index 969a8d6fb46c4..baab868ec0e8a 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.function.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.function.txt @@ -21,7 +21,6 @@ # Painless definition file. This defines the hierarchy of classes, # what methods and fields they have, etc. # - #### Interfaces class BiConsumer -> java.util.function.BiConsumer { @@ -34,7 +33,7 @@ class BiFunction -> java.util.function.BiFunction { def apply(def,def) } -class BinaryOperator -> java.util.function.BinaryOperator extends BiFunction { +class BinaryOperator -> java.util.function.BinaryOperator { BinaryOperator maxBy(Comparator) BinaryOperator minBy(Comparator) } @@ -227,6 +226,6 @@ class ToLongFunction -> java.util.function.ToLongFunction { long applyAsLong(def) } -class UnaryOperator -> java.util.function.UnaryOperator extends Function { +class UnaryOperator -> java.util.function.UnaryOperator { UnaryOperator identity() } diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.regex.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.regex.txt index 4bf1993528bdd..9ea87dd4197ab 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.regex.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.regex.txt @@ -22,7 +22,7 @@ # what methods and fields they have, etc. # -class Pattern -> java.util.regex.Pattern extends Object { +class Pattern -> java.util.regex.Pattern { # Pattern compile(String) Intentionally not included. We don't want dynamic patterns because they allow regexes to be generated per time # the script is run which is super slow. LRegex generates code that calls this method but it skips these checks. Predicate asPredicate() @@ -35,14 +35,14 @@ class Pattern -> java.util.regex.Pattern extends Object { Stream splitAsStream(CharSequence) } -class Matcher -> java.util.regex.Matcher extends Object { +class Matcher -> java.util.regex.Matcher { int end() int end(int) boolean find() boolean find(int) String group() String group(int) - String org.elasticsearch.painless.api.Augmentation.namedGroup(String) + String org.elasticsearch.painless.api.Augmentation namedGroup(String) int groupCount() boolean hasAnchoringBounds() boolean hasTransparentBounds() diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.stream.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.stream.txt index d24cf8c04246e..d531fbb558f3c 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.stream.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.stream.txt @@ -43,7 +43,7 @@ class Collector -> java.util.stream.Collector { Supplier supplier() } -class DoubleStream -> java.util.stream.DoubleStream extends BaseStream { +class DoubleStream -> java.util.stream.DoubleStream { boolean allMatch(DoublePredicate) boolean anyMatch(DoublePredicate) OptionalDouble average() @@ -82,12 +82,12 @@ class DoubleStream -> java.util.stream.DoubleStream extends BaseStream { double[] toArray() } -class DoubleStream.Builder -> java.util.stream.DoubleStream$Builder extends DoubleConsumer { +class DoubleStream.Builder -> java.util.stream.DoubleStream$Builder { DoubleStream.Builder add(double) DoubleStream build() } -class IntStream -> java.util.stream.IntStream extends BaseStream { +class IntStream -> java.util.stream.IntStream { boolean allMatch(IntPredicate) boolean anyMatch(IntPredicate) DoubleStream asDoubleStream() @@ -130,12 +130,12 @@ class IntStream -> java.util.stream.IntStream extends BaseStream { int[] toArray() } -class IntStream.Builder -> java.util.stream.IntStream$Builder extends IntConsumer { +class IntStream.Builder -> java.util.stream.IntStream$Builder { IntStream.Builder add(int) IntStream build() } -class LongStream -> java.util.stream.LongStream extends BaseStream { +class LongStream -> java.util.stream.LongStream { boolean allMatch(LongPredicate) boolean anyMatch(LongPredicate) DoubleStream asDoubleStream() @@ -177,12 +177,12 @@ class LongStream -> java.util.stream.LongStream extends BaseStream { long[] toArray() } -class LongStream.Builder -> java.util.stream.LongStream$Builder extends LongConsumer { +class LongStream.Builder -> java.util.stream.LongStream$Builder { LongStream.Builder add(long) LongStream build() } -class Stream -> java.util.stream.Stream extends BaseStream { +class Stream -> java.util.stream.Stream { boolean allMatch(Predicate) boolean anyMatch(Predicate) Stream.Builder builder() @@ -221,14 +221,14 @@ class Stream -> java.util.stream.Stream extends BaseStream { def[] toArray(IntFunction) } -class Stream.Builder -> java.util.stream.Stream$Builder extends Consumer { +class Stream.Builder -> java.util.stream.Stream$Builder { Stream.Builder add(def) Stream build() } #### Classes -class Collectors -> java.util.stream.Collectors extends Object { +class Collectors -> java.util.stream.Collectors { Collector averagingDouble(ToDoubleFunction) Collector averagingInt(ToIntFunction) Collector averagingLong(ToLongFunction) @@ -264,7 +264,7 @@ class Collectors -> java.util.stream.Collectors extends Object { #### Enums -class Collector.Characteristics -> java.util.stream.Collector$Characteristics extends Enum,Object { +class Collector.Characteristics -> java.util.stream.Collector$Characteristics { Collector.Characteristics CONCURRENT Collector.Characteristics IDENTITY_FINISH Collector.Characteristics UNORDERED diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.txt index ba50a30042cd9..164798e68d325 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.txt @@ -24,7 +24,7 @@ #### Interfaces -class Collection -> java.util.Collection extends Iterable { +class Collection -> java.util.Collection { boolean add(def) boolean addAll(Collection) void clear() @@ -41,13 +41,13 @@ class Collection -> java.util.Collection extends Iterable { def[] toArray(def[]) # some adaptations of groovy methods - List org.elasticsearch.painless.api.Augmentation.collect(Function) - def org.elasticsearch.painless.api.Augmentation.collect(Collection,Function) - def org.elasticsearch.painless.api.Augmentation.find(Predicate) - List org.elasticsearch.painless.api.Augmentation.findAll(Predicate) - def org.elasticsearch.painless.api.Augmentation.findResult(Function) - def org.elasticsearch.painless.api.Augmentation.findResult(def,Function) - List org.elasticsearch.painless.api.Augmentation.split(Predicate) + List org.elasticsearch.painless.api.Augmentation collect(Function) + def org.elasticsearch.painless.api.Augmentation collect(Collection,Function) + def org.elasticsearch.painless.api.Augmentation find(Predicate) + List org.elasticsearch.painless.api.Augmentation findAll(Predicate) + def org.elasticsearch.painless.api.Augmentation findResult(Function) + def org.elasticsearch.painless.api.Augmentation findResult(def,Function) + List org.elasticsearch.painless.api.Augmentation split(Predicate) } class Comparator -> java.util.Comparator { @@ -70,7 +70,7 @@ class Comparator -> java.util.Comparator { Comparator thenComparingLong(ToLongFunction) } -class Deque -> java.util.Deque extends Queue,Collection,Iterable { +class Deque -> java.util.Deque { void addFirst(def) void addLast(def) Iterator descendingIterator() @@ -110,7 +110,7 @@ class Iterator -> java.util.Iterator { void remove() } -class List -> java.util.List extends Collection,Iterable { +class List -> java.util.List { void add(int,def) boolean addAll(int,Collection) boolean equals(Object) @@ -123,12 +123,12 @@ class List -> java.util.List extends Collection,Iterable { def remove(int) void replaceAll(UnaryOperator) def set(int,def) - int org.elasticsearch.painless.api.Augmentation.getLength() + int org.elasticsearch.painless.api.Augmentation getLength() void sort(Comparator) List subList(int,int) } -class ListIterator -> java.util.ListIterator extends Iterator { +class ListIterator -> java.util.ListIterator { void add(def) boolean hasPrevious() int nextIndex() @@ -163,17 +163,17 @@ class Map -> java.util.Map { Collection values() # some adaptations of groovy methods - List org.elasticsearch.painless.api.Augmentation.collect(BiFunction) - def org.elasticsearch.painless.api.Augmentation.collect(Collection,BiFunction) - int org.elasticsearch.painless.api.Augmentation.count(BiPredicate) - def org.elasticsearch.painless.api.Augmentation.each(BiConsumer) - boolean org.elasticsearch.painless.api.Augmentation.every(BiPredicate) - Map.Entry org.elasticsearch.painless.api.Augmentation.find(BiPredicate) - Map org.elasticsearch.painless.api.Augmentation.findAll(BiPredicate) - def org.elasticsearch.painless.api.Augmentation.findResult(BiFunction) - def org.elasticsearch.painless.api.Augmentation.findResult(def,BiFunction) - List org.elasticsearch.painless.api.Augmentation.findResults(BiFunction) - Map org.elasticsearch.painless.api.Augmentation.groupBy(BiFunction) + List org.elasticsearch.painless.api.Augmentation collect(BiFunction) + def org.elasticsearch.painless.api.Augmentation collect(Collection,BiFunction) + int org.elasticsearch.painless.api.Augmentation count(BiPredicate) + def org.elasticsearch.painless.api.Augmentation each(BiConsumer) + boolean org.elasticsearch.painless.api.Augmentation every(BiPredicate) + Map.Entry org.elasticsearch.painless.api.Augmentation find(BiPredicate) + Map org.elasticsearch.painless.api.Augmentation findAll(BiPredicate) + def org.elasticsearch.painless.api.Augmentation findResult(BiFunction) + def org.elasticsearch.painless.api.Augmentation findResult(def,BiFunction) + List org.elasticsearch.painless.api.Augmentation findResults(BiFunction) + Map org.elasticsearch.painless.api.Augmentation groupBy(BiFunction) } class Map.Entry -> java.util.Map$Entry { @@ -188,7 +188,7 @@ class Map.Entry -> java.util.Map$Entry { def setValue(def) } -class NavigableMap -> java.util.NavigableMap extends SortedMap,Map { +class NavigableMap -> java.util.NavigableMap { Map.Entry ceilingEntry(def) def ceilingKey(def) NavigableSet descendingKeySet() @@ -208,7 +208,7 @@ class NavigableMap -> java.util.NavigableMap extends SortedMap,Map { NavigableMap tailMap(def,boolean) } -class NavigableSet -> java.util.NavigableSet extends SortedSet,Set,Collection,Iterable { +class NavigableSet -> java.util.NavigableSet { def ceiling(def) Iterator descendingIterator() NavigableSet descendingSet() @@ -226,21 +226,21 @@ class Observer -> java.util.Observer { void update(Observable,Object) } -class PrimitiveIterator -> java.util.PrimitiveIterator extends Iterator { +class PrimitiveIterator -> java.util.PrimitiveIterator { void forEachRemaining(def) } -class PrimitiveIterator.OfDouble -> java.util.PrimitiveIterator$OfDouble extends PrimitiveIterator,Iterator { +class PrimitiveIterator.OfDouble -> java.util.PrimitiveIterator$OfDouble { Double next() double nextDouble() } -class PrimitiveIterator.OfInt -> java.util.PrimitiveIterator$OfInt extends PrimitiveIterator,Iterator { +class PrimitiveIterator.OfInt -> java.util.PrimitiveIterator$OfInt { Integer next() int nextInt() } -class PrimitiveIterator.OfLong -> java.util.PrimitiveIterator$OfLong extends PrimitiveIterator,Iterator { +class PrimitiveIterator.OfLong -> java.util.PrimitiveIterator$OfLong { Long next() long nextLong() } @@ -264,25 +264,25 @@ class Spliterator -> java.util.Spliterator { Spliterator trySplit() } -class Spliterator.OfPrimitive -> java.util.Spliterator$OfPrimitive extends Spliterator { +class Spliterator.OfPrimitive -> java.util.Spliterator$OfPrimitive { void forEachRemaining(def) boolean tryAdvance(def) Spliterator.OfPrimitive trySplit() } -class Spliterator.OfDouble -> java.util.Spliterator$OfDouble extends Spliterator.OfPrimitive,Spliterator { +class Spliterator.OfDouble -> java.util.Spliterator$OfDouble { Spliterator.OfDouble trySplit() } -class Spliterator.OfInt -> java.util.Spliterator$OfInt extends Spliterator.OfPrimitive,Spliterator { +class Spliterator.OfInt -> java.util.Spliterator$OfInt { Spliterator.OfInt trySplit() } -class Spliterator.OfLong -> java.util.Spliterator$OfLong extends Spliterator.OfPrimitive,Spliterator { +class Spliterator.OfLong -> java.util.Spliterator$OfLong { Spliterator.OfLong trySplit() } -class Queue -> java.util.Queue extends Collection,Iterable { +class Queue -> java.util.Queue { def element() boolean offer(def) def peek() @@ -293,13 +293,13 @@ class Queue -> java.util.Queue extends Collection,Iterable { class RandomAccess -> java.util.RandomAccess { } -class Set -> java.util.Set extends Collection,Iterable { +class Set -> java.util.Set { boolean equals(Object) int hashCode() boolean remove(def) } -class SortedMap -> java.util.SortedMap extends Map { +class SortedMap -> java.util.SortedMap { Comparator comparator() def firstKey() SortedMap headMap(def) @@ -308,7 +308,7 @@ class SortedMap -> java.util.SortedMap extends Map { SortedMap tailMap(def) } -class SortedSet -> java.util.SortedSet extends Set,Collection,Iterable { +class SortedSet -> java.util.SortedSet { Comparator comparator() def first() SortedSet headSet(def) @@ -319,55 +319,55 @@ class SortedSet -> java.util.SortedSet extends Set,Collection,Iterable { #### Classes -class AbstractCollection -> java.util.AbstractCollection extends Collection,Iterable,Object { +class AbstractCollection -> java.util.AbstractCollection { } -class AbstractList -> java.util.AbstractList extends AbstractCollection,List,Collection,Iterable,Object { +class AbstractList -> java.util.AbstractList { } -class AbstractMap -> java.util.AbstractMap extends Map,Object { +class AbstractMap -> java.util.AbstractMap { } -class AbstractMap.SimpleEntry -> java.util.AbstractMap$SimpleEntry extends Map.Entry,Object { - AbstractMap.SimpleEntry (def,def) - AbstractMap.SimpleEntry (Map.Entry) +class AbstractMap.SimpleEntry -> java.util.AbstractMap$SimpleEntry { + (def,def) + (Map.Entry) } -class AbstractMap.SimpleImmutableEntry -> java.util.AbstractMap$SimpleImmutableEntry extends Map.Entry,Object { - AbstractMap.SimpleImmutableEntry (def,def) - AbstractMap.SimpleImmutableEntry (Map.Entry) +class AbstractMap.SimpleImmutableEntry -> java.util.AbstractMap$SimpleImmutableEntry { + (def,def) + (Map.Entry) } -class AbstractQueue -> java.util.AbstractQueue extends AbstractCollection,Queue,Collection,Iterable,Object { +class AbstractQueue -> java.util.AbstractQueue { } -class AbstractSequentialList -> java.util.AbstractSequentialList extends AbstractList,AbstractCollection,List,Collection,Iterable,Object { +class AbstractSequentialList -> java.util.AbstractSequentialList { } -class AbstractSet -> java.util.AbstractSet extends AbstractCollection,Set,Collection,Iterable,Object { +class AbstractSet -> java.util.AbstractSet { } -class ArrayDeque -> java.util.ArrayDeque extends AbstractCollection,Deque,Queue,Collection,Iterable,Object { - ArrayDeque () - ArrayDeque (Collection) +class ArrayDeque -> java.util.ArrayDeque { + () + (Collection) ArrayDeque clone() } -class ArrayList -> java.util.ArrayList extends AbstractList,AbstractCollection,List,RandomAccess,Collection,Iterable,Object { - ArrayList () - ArrayList (Collection) +class ArrayList -> java.util.ArrayList { + () + (Collection) def clone() void trimToSize() } -class Arrays -> java.util.Arrays extends Object { +class Arrays -> java.util.Arrays { List asList(Object[]) boolean deepEquals(Object[],Object[]) int deepHashCode(Object[]) String deepToString(Object[]) } -class Base64 -> java.util.Base64 extends Object { +class Base64 -> java.util.Base64 { Base64.Decoder getDecoder() Base64.Encoder getEncoder() Base64.Decoder getMimeDecoder() @@ -377,20 +377,20 @@ class Base64 -> java.util.Base64 extends Object { Base64.Encoder getUrlEncoder() } -class Base64.Decoder -> java.util.Base64$Decoder extends Object { +class Base64.Decoder -> java.util.Base64$Decoder { int decode(byte[],byte[]) byte[] decode(String) } -class Base64.Encoder -> java.util.Base64$Encoder extends Object { +class Base64.Encoder -> java.util.Base64$Encoder { int encode(byte[],byte[]) String encodeToString(byte[]) Base64.Encoder withoutPadding() } -class BitSet -> java.util.BitSet extends Object { - BitSet () - BitSet (int) +class BitSet -> java.util.BitSet { + () + (int) void and(BitSet) void andNot(BitSet) int cardinality() @@ -418,7 +418,7 @@ class BitSet -> java.util.BitSet extends Object { void xor(BitSet) } -class Calendar -> java.util.Calendar extends Comparable,Object { +class Calendar -> java.util.Calendar { int ALL_STYLES int AM int AM_PM @@ -516,8 +516,8 @@ class Calendar -> java.util.Calendar extends Comparable,Object { Instant toInstant() } -class Calendar.Builder -> java.util.Calendar$Builder extends Object { - Calendar.Builder () +class Calendar.Builder -> java.util.Calendar$Builder { + () Calendar build() Calendar.Builder set(int,int) Calendar.Builder setCalendarType(String) @@ -533,7 +533,7 @@ class Calendar.Builder -> java.util.Calendar$Builder extends Object { Calendar.Builder setWeekDefinition(int,int) } -class Collections -> java.util.Collections extends Object { +class Collections -> java.util.Collections { List EMPTY_LIST Map EMPTY_MAP Set EMPTY_SET @@ -588,7 +588,7 @@ class Collections -> java.util.Collections extends Object { SortedSet unmodifiableSortedSet(SortedSet) } -class Currency -> java.util.Currency extends Object { +class Currency -> java.util.Currency { Set getAvailableCurrencies() String getCurrencyCode() int getDefaultFractionDigits() @@ -600,9 +600,9 @@ class Currency -> java.util.Currency extends Object { String getSymbol(Locale) } -class Date -> java.util.Date extends Comparable,Object { - Date () - Date (long) +class Date -> java.util.Date { + () + (long) boolean after(Date) boolean before(Date) def clone() @@ -612,7 +612,7 @@ class Date -> java.util.Date extends Comparable,Object { void setTime(long) } -class Dictionary -> java.util.Dictionary extends Object { +class Dictionary -> java.util.Dictionary { Enumeration elements() def get(def) boolean isEmpty() @@ -622,8 +622,8 @@ class Dictionary -> java.util.Dictionary extends Object { int size() } -class DoubleSummaryStatistics -> java.util.DoubleSummaryStatistics extends DoubleConsumer,Object { - DoubleSummaryStatistics () +class DoubleSummaryStatistics -> java.util.DoubleSummaryStatistics { + () void combine(DoubleSummaryStatistics) double getAverage() long getCount() @@ -632,40 +632,40 @@ class DoubleSummaryStatistics -> java.util.DoubleSummaryStatistics extends Doubl double getSum() } -class EventListenerProxy -> java.util.EventListenerProxy extends EventListener,Object { +class EventListenerProxy -> java.util.EventListenerProxy { EventListener getListener() } -class EventObject -> java.util.EventObject extends Object { - EventObject (Object) +class EventObject -> java.util.EventObject { + (Object) Object getSource() } -class FormattableFlags -> java.util.FormattableFlags extends Object { +class FormattableFlags -> java.util.FormattableFlags { int ALTERNATE int LEFT_JUSTIFY int UPPERCASE } -class Formatter -> java.util.Formatter extends Object { - Formatter () - Formatter (Appendable) - Formatter (Appendable,Locale) +class Formatter -> java.util.Formatter { + () + (Appendable) + (Appendable,Locale) Formatter format(Locale,String,def[]) Formatter format(String,def[]) Locale locale() Appendable out() } -class GregorianCalendar -> java.util.GregorianCalendar extends Calendar,Comparable,Object { +class GregorianCalendar -> java.util.GregorianCalendar { int AD int BC - GregorianCalendar () - GregorianCalendar (int,int,int) - GregorianCalendar (int,int,int,int,int) - GregorianCalendar (int,int,int,int,int,int) - GregorianCalendar (TimeZone) - GregorianCalendar (TimeZone,Locale) + () + (int,int,int) + (int,int,int,int,int) + (int,int,int,int,int,int) + (TimeZone) + (TimeZone,Locale) GregorianCalendar from(ZonedDateTime) Date getGregorianChange() boolean isLeapYear(int) @@ -673,32 +673,32 @@ class GregorianCalendar -> java.util.GregorianCalendar extends Calendar,Comparab ZonedDateTime toZonedDateTime() } -class HashMap -> java.util.HashMap extends AbstractMap,Map,Object { - HashMap () - HashMap (Map) +class HashMap -> java.util.HashMap { + () + (Map) def clone() } -class HashSet -> java.util.HashSet extends AbstractSet,Set,Collection,Iterable,Object { - HashSet () - HashSet (Collection) +class HashSet -> java.util.HashSet { + () + (Collection) def clone() } -class Hashtable -> java.util.Hashtable extends Dictionary,Map,Object { - Hashtable () - Hashtable (Map) +class Hashtable -> java.util.Hashtable { + () + (Map) def clone() } -class IdentityHashMap -> java.util.IdentityHashMap extends AbstractMap,Map,Object { - IdentityHashMap () - IdentityHashMap (Map) +class IdentityHashMap -> java.util.IdentityHashMap { + () + (Map) def clone() } -class IntSummaryStatistics -> java.util.IntSummaryStatistics extends IntConsumer,Object { - IntSummaryStatistics () +class IntSummaryStatistics -> java.util.IntSummaryStatistics { + () void combine(IntSummaryStatistics) double getAverage() long getCount() @@ -707,23 +707,23 @@ class IntSummaryStatistics -> java.util.IntSummaryStatistics extends IntConsumer long getSum() } -class LinkedHashMap -> java.util.LinkedHashMap extends HashMap,AbstractMap,Map,Object { - LinkedHashMap () - LinkedHashMap (Map) +class LinkedHashMap -> java.util.LinkedHashMap { + () + (Map) } -class LinkedHashSet -> java.util.LinkedHashSet extends HashSet,AbstractSet,Set,AbstractCollection,Collection,Iterable,Object { - LinkedHashSet () - LinkedHashSet (Collection) +class LinkedHashSet -> java.util.LinkedHashSet { + () + (Collection) } -class LinkedList -> java.util.LinkedList extends AbstractSequentialList,AbstractList,List,Deque,Queue,AbstractCollection,Collection,Iterable,Object { - LinkedList () - LinkedList (Collection) +class LinkedList -> java.util.LinkedList { + () + (Collection) def clone() } -class Locale -> java.util.Locale extends Object { +class Locale -> java.util.Locale { Locale CANADA Locale CANADA_FRENCH Locale CHINA @@ -748,9 +748,9 @@ class Locale -> java.util.Locale extends Object { Locale UK char UNICODE_LOCALE_EXTENSION Locale US - Locale (String) - Locale (String,String) - Locale (String,String,String) + (String) + (String,String) + (String,String,String) def clone() List filter(List,Collection) List filterTags(List,Collection) @@ -788,8 +788,8 @@ class Locale -> java.util.Locale extends Object { String toLanguageTag() } -class Locale.Builder -> java.util.Locale$Builder extends Object { - Locale.Builder () +class Locale.Builder -> java.util.Locale$Builder { + () Locale.Builder addUnicodeLocaleAttribute(String) Locale build() Locale.Builder clear() @@ -805,11 +805,11 @@ class Locale.Builder -> java.util.Locale$Builder extends Object { Locale.Builder setVariant(String) } -class Locale.LanguageRange -> java.util.Locale$LanguageRange extends Object { +class Locale.LanguageRange -> java.util.Locale$LanguageRange { double MAX_WEIGHT double MIN_WEIGHT - Locale.LanguageRange (String) - Locale.LanguageRange (String,double) + (String) + (String,double) String getRange() double getWeight() List mapEquivalents(List,Map) @@ -817,8 +817,8 @@ class Locale.LanguageRange -> java.util.Locale$LanguageRange extends Object { List parse(String,Map) } -class LongSummaryStatistics -> java.util.LongSummaryStatistics extends LongConsumer,Object { - LongSummaryStatistics () +class LongSummaryStatistics -> java.util.LongSummaryStatistics { + () void combine(LongSummaryStatistics) double getAverage() long getCount() @@ -827,7 +827,7 @@ class LongSummaryStatistics -> java.util.LongSummaryStatistics extends LongConsu long getSum() } -class Objects -> java.util.Objects extends Object { +class Objects -> java.util.Objects { int compare(def,def,Comparator) boolean deepEquals(Object,Object) boolean equals(Object,Object) @@ -841,8 +841,8 @@ class Objects -> java.util.Objects extends Object { String toString(Object,String) } -class Observable -> java.util.Observable extends Object { - Observable () +class Observable -> java.util.Observable { + () void addObserver(Observer) int countObservers() void deleteObserver(Observer) @@ -852,7 +852,7 @@ class Observable -> java.util.Observable extends Object { void notifyObservers(Object) } -class Optional -> java.util.Optional extends Object { +class Optional -> java.util.Optional { Optional empty() Optional filter(Predicate) Optional flatMap(Function) @@ -867,7 +867,7 @@ class Optional -> java.util.Optional extends Object { def orElseThrow(Supplier) } -class OptionalDouble -> java.util.OptionalDouble extends Object { +class OptionalDouble -> java.util.OptionalDouble { OptionalDouble empty() double getAsDouble() void ifPresent(DoubleConsumer) @@ -878,7 +878,7 @@ class OptionalDouble -> java.util.OptionalDouble extends Object { double orElseThrow(Supplier) } -class OptionalInt -> java.util.OptionalInt extends Object { +class OptionalInt -> java.util.OptionalInt { OptionalInt empty() int getAsInt() void ifPresent(IntConsumer) @@ -889,7 +889,7 @@ class OptionalInt -> java.util.OptionalInt extends Object { int orElseThrow(Supplier) } -class OptionalLong -> java.util.OptionalLong extends Object { +class OptionalLong -> java.util.OptionalLong { OptionalLong empty() long getAsLong() void ifPresent(LongConsumer) @@ -900,14 +900,14 @@ class OptionalLong -> java.util.OptionalLong extends Object { long orElseThrow(Supplier) } -class PriorityQueue -> java.util.PriorityQueue extends AbstractQueue,Queue,AbstractCollection,Collection,Iterable,Object { - PriorityQueue () - PriorityQueue (Comparator) +class PriorityQueue -> java.util.PriorityQueue { + () + (Comparator) } -class Random -> java.util.Random extends Object { - Random () - Random (long) +class Random -> java.util.Random { + () + (long) DoubleStream doubles(long) DoubleStream doubles(long,double,double) IntStream ints(long) @@ -925,14 +925,14 @@ class Random -> java.util.Random extends Object { void setSeed(long) } -class SimpleTimeZone -> java.util.SimpleTimeZone extends TimeZone,Object { +class SimpleTimeZone -> java.util.SimpleTimeZone { int STANDARD_TIME int UTC_TIME int WALL_TIME - SimpleTimeZone (int,String) - SimpleTimeZone (int,String,int,int,int,int,int,int,int,int) - SimpleTimeZone (int,String,int,int,int,int,int,int,int,int,int) - SimpleTimeZone (int,String,int,int,int,int,int,int,int,int,int,int,int) + (int,String) + (int,String,int,int,int,int,int,int,int,int) + (int,String,int,int,int,int,int,int,int,int,int) + (int,String,int,int,int,int,int,int,int,int,int,int,int) int getDSTSavings() void setDSTSavings(int) void setEndRule(int,int,int) @@ -944,7 +944,7 @@ class SimpleTimeZone -> java.util.SimpleTimeZone extends TimeZone,Object { void setStartYear(int) } -class Spliterators -> java.util.Spliterators extends Object { +class Spliterators -> java.util.Spliterators { Spliterator.OfDouble emptyDoubleSpliterator() Spliterator.OfInt emptyIntSpliterator() Spliterator.OfLong emptyLongSpliterator() @@ -955,8 +955,8 @@ class Spliterators -> java.util.Spliterators extends Object { Spliterator spliteratorUnknownSize(Iterator,int) } -class Stack -> java.util.Stack extends Vector,AbstractList,List,AbstractCollection,Collection,Iterable,RandomAccess,Object { - Stack () +class Stack -> java.util.Stack { + () def push(def) def pop() def peek() @@ -964,26 +964,26 @@ class Stack -> java.util.Stack extends Vector,AbstractList,List,AbstractCollecti int search(def) } -class StringJoiner -> java.util.StringJoiner extends Object { - StringJoiner (CharSequence) - StringJoiner (CharSequence,CharSequence,CharSequence) +class StringJoiner -> java.util.StringJoiner { + (CharSequence) + (CharSequence,CharSequence,CharSequence) StringJoiner add(CharSequence) int length() StringJoiner merge(StringJoiner) StringJoiner setEmptyValue(CharSequence) } -class StringTokenizer -> java.util.StringTokenizer extends Enumeration,Object { - StringTokenizer (String) - StringTokenizer (String,String) - StringTokenizer (String,String,boolean) +class StringTokenizer -> java.util.StringTokenizer { + (String) + (String,String) + (String,String,boolean) int countTokens() boolean hasMoreTokens() String nextToken() String nextToken(String) } -class TimeZone -> java.util.TimeZone extends Object { +class TimeZone -> java.util.TimeZone { int LONG int SHORT def clone() @@ -1008,20 +1008,20 @@ class TimeZone -> java.util.TimeZone extends Object { boolean useDaylightTime() } -class TreeMap -> java.util.TreeMap extends AbstractMap,NavigableMap,SortedMap,Map,Object { - TreeMap () - TreeMap (Comparator) +class TreeMap -> java.util.TreeMap { + () + (Comparator) def clone() } -class TreeSet -> java.util.TreeSet extends AbstractSet,NavigableSet,SortedSet,Set,AbstractCollection,Collection,Iterable,Object { - TreeSet () - TreeSet (Comparator) +class TreeSet -> java.util.TreeSet { + () + (Comparator) def clone() } -class UUID -> java.util.UUID extends Comparable,Object { - UUID (long,long) +class UUID -> java.util.UUID { + (long,long) int compareTo(UUID) int clockSequence() UUID fromString(String) @@ -1034,9 +1034,9 @@ class UUID -> java.util.UUID extends Comparable,Object { int version() } -class Vector -> java.util.Vector extends AbstractList,List,AbstractCollection,Collection,Iterable,RandomAccess,Object { - Vector () - Vector (Collection) +class Vector -> java.util.Vector { + () + (Collection) void addElement(def) void copyInto(Object[]) def elementAt(int) @@ -1054,19 +1054,19 @@ class Vector -> java.util.Vector extends AbstractList,List,AbstractCollection,Co #### Enums -class Formatter.BigDecimalLayoutForm -> java.util.Formatter$BigDecimalLayoutForm extends Enum,Comparable,Object { +class Formatter.BigDecimalLayoutForm -> java.util.Formatter$BigDecimalLayoutForm { Formatter.BigDecimalLayoutForm DECIMAL_FLOAT Formatter.BigDecimalLayoutForm SCIENTIFIC } -class Locale.Category -> java.util.Locale$Category extends Enum,Comparable,Object { +class Locale.Category -> java.util.Locale$Category { Locale.Category DISPLAY Locale.Category FORMAT Locale.Category valueOf(String) Locale.Category[] values() } -class Locale.FilteringMode -> java.util.Locale$FilteringMode extends Enum,Comparable,Object { +class Locale.FilteringMode -> java.util.Locale$FilteringMode { Locale.FilteringMode AUTOSELECT_FILTERING Locale.FilteringMode EXTENDED_FILTERING Locale.FilteringMode IGNORE_EXTENDED_RANGES @@ -1078,101 +1078,101 @@ class Locale.FilteringMode -> java.util.Locale$FilteringMode extends Enum,Compar #### Exceptions -class ConcurrentModificationException -> java.util.ConcurrentModificationException extends RuntimeException,Exception,Object { - ConcurrentModificationException () - ConcurrentModificationException (String) +class ConcurrentModificationException -> java.util.ConcurrentModificationException { + () + (String) } -class DuplicateFormatFlagsException -> java.util.DuplicateFormatFlagsException extends IllegalFormatException,IllegalArgumentException,RuntimeException,Exception,Object { - DuplicateFormatFlagsException (String) +class DuplicateFormatFlagsException -> java.util.DuplicateFormatFlagsException { + (String) String getFlags() } -class EmptyStackException -> java.util.EmptyStackException extends RuntimeException,Exception,Object { - EmptyStackException () +class EmptyStackException -> java.util.EmptyStackException { + () } -class FormatFlagsConversionMismatchException -> java.util.FormatFlagsConversionMismatchException extends IllegalFormatException,IllegalArgumentException,RuntimeException,Exception,Object { - FormatFlagsConversionMismatchException (String,char) +class FormatFlagsConversionMismatchException -> java.util.FormatFlagsConversionMismatchException { + (String,char) char getConversion() String getFlags() } -class FormatterClosedException -> java.util.FormatterClosedException extends IllegalStateException,RuntimeException,Exception,Object { - FormatterClosedException () +class FormatterClosedException -> java.util.FormatterClosedException { + () } -class IllegalFormatCodePointException -> java.util.IllegalFormatCodePointException extends IllegalFormatException,IllegalArgumentException,RuntimeException,Exception,Object { - IllegalFormatCodePointException (int) +class IllegalFormatCodePointException -> java.util.IllegalFormatCodePointException { + (int) int getCodePoint() } -class IllegalFormatConversionException -> java.util.IllegalFormatConversionException extends IllegalFormatException,IllegalArgumentException,RuntimeException,Exception,Object { +class IllegalFormatConversionException -> java.util.IllegalFormatConversionException { char getConversion() } -class IllegalFormatException -> java.util.IllegalFormatException extends IllegalArgumentException,RuntimeException,Exception,Object { +class IllegalFormatException -> java.util.IllegalFormatException { } -class IllegalFormatFlagsException -> java.util.IllegalFormatFlagsException extends IllegalFormatException,IllegalArgumentException,RuntimeException,Exception,Object { - IllegalFormatFlagsException (String) +class IllegalFormatFlagsException -> java.util.IllegalFormatFlagsException { + (String) String getFlags() } -class IllegalFormatPrecisionException -> java.util.IllegalFormatPrecisionException extends IllegalFormatException,IllegalArgumentException,RuntimeException,Exception,Object { - IllegalFormatPrecisionException (int) +class IllegalFormatPrecisionException -> java.util.IllegalFormatPrecisionException { + (int) int getPrecision() } -class IllegalFormatWidthException -> java.util.IllegalFormatWidthException extends IllegalFormatException,IllegalArgumentException,RuntimeException,Exception,Object { - IllegalFormatWidthException (int) +class IllegalFormatWidthException -> java.util.IllegalFormatWidthException { + (int) int getWidth() } -class IllformedLocaleException -> java.util.IllformedLocaleException extends RuntimeException,Exception,Object { - IllformedLocaleException () - IllformedLocaleException (String) - IllformedLocaleException (String,int) +class IllformedLocaleException -> java.util.IllformedLocaleException { + () + (String) + (String,int) int getErrorIndex() } -class InputMismatchException -> java.util.InputMismatchException extends NoSuchElementException,RuntimeException,Exception,Object { - InputMismatchException () - InputMismatchException (String) +class InputMismatchException -> java.util.InputMismatchException { + () + (String) } -class MissingFormatArgumentException -> java.util.MissingFormatArgumentException extends IllegalFormatException,IllegalArgumentException,RuntimeException,Exception,Object { - MissingFormatArgumentException (String) +class MissingFormatArgumentException -> java.util.MissingFormatArgumentException { + (String) String getFormatSpecifier() } -class MissingFormatWidthException -> java.util.MissingFormatWidthException extends IllegalFormatException,IllegalArgumentException,RuntimeException,Exception,Object { - MissingFormatWidthException (String) +class MissingFormatWidthException -> java.util.MissingFormatWidthException { + (String) String getFormatSpecifier() } -class MissingResourceException -> java.util.MissingResourceException extends RuntimeException,Exception,Object { - MissingResourceException (String,String,String) +class MissingResourceException -> java.util.MissingResourceException { + (String,String,String) String getClassName() String getKey() } -class NoSuchElementException -> java.util.NoSuchElementException extends RuntimeException,Exception,Object { - NoSuchElementException () - NoSuchElementException (String) +class NoSuchElementException -> java.util.NoSuchElementException { + () + (String) } -class TooManyListenersException -> java.util.TooManyListenersException extends Exception,Object { - TooManyListenersException () - TooManyListenersException (String) +class TooManyListenersException -> java.util.TooManyListenersException { + () + (String) } -class UnknownFormatConversionException -> java.util.UnknownFormatConversionException extends IllegalFormatException,IllegalArgumentException,RuntimeException,Exception,Object { - UnknownFormatConversionException (String) +class UnknownFormatConversionException -> java.util.UnknownFormatConversionException { + (String) String getConversion() } -class UnknownFormatFlagsException -> java.util.UnknownFormatFlagsException extends IllegalFormatException,IllegalArgumentException,RuntimeException,Exception,Object { - UnknownFormatFlagsException (String) +class UnknownFormatFlagsException -> java.util.UnknownFormatFlagsException { + (String) String getFlags() } diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/joda.time.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/joda.time.txt index 02d959215345e..6899e2878680c 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/joda.time.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/joda.time.txt @@ -26,7 +26,7 @@ # convenient access via the scripting API. classes are fully qualified to avoid # any confusion with java.time -class org.joda.time.ReadableInstant -> org.joda.time.ReadableInstant extends Comparable { +class org.joda.time.ReadableInstant -> org.joda.time.ReadableInstant { boolean equals(Object) long getMillis() int hashCode() @@ -36,7 +36,7 @@ class org.joda.time.ReadableInstant -> org.joda.time.ReadableInstant extends Com String toString() } -class org.joda.time.ReadableDateTime -> org.joda.time.ReadableDateTime extends org.joda.time.ReadableInstant,Comparable { +class org.joda.time.ReadableDateTime -> org.joda.time.ReadableDateTime { int getCenturyOfEra() int getDayOfMonth() int getDayOfWeek() diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt index 8f9b0413c4131..5ff486b0b81f2 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt @@ -51,33 +51,26 @@ class float -> float { class double -> double { } -class def -> java.lang.Object { - boolean equals(Object) - int hashCode() - String toString() -} - - #### Painless debugging API -class Debug -> org.elasticsearch.painless.api.Debug extends Object { +class Debug -> org.elasticsearch.painless.api.Debug { void explain(Object) } #### ES Scripting API -class org.elasticsearch.common.geo.GeoPoint -> org.elasticsearch.common.geo.GeoPoint extends Object { +class org.elasticsearch.common.geo.GeoPoint -> org.elasticsearch.common.geo.GeoPoint { double getLat() double getLon() } -class org.elasticsearch.index.fielddata.ScriptDocValues.Strings -> org.elasticsearch.index.fielddata.ScriptDocValues$Strings extends List,Collection,Iterable,Object { +class org.elasticsearch.index.fielddata.ScriptDocValues.Strings -> org.elasticsearch.index.fielddata.ScriptDocValues$Strings { String get(int) String getValue() List getValues() } -class org.elasticsearch.index.fielddata.ScriptDocValues.Longs -> org.elasticsearch.index.fielddata.ScriptDocValues$Longs extends List,Collection,Iterable,Object { +class org.elasticsearch.index.fielddata.ScriptDocValues.Longs -> org.elasticsearch.index.fielddata.ScriptDocValues$Longs { Long get(int) long getValue() List getValues() @@ -85,7 +78,7 @@ class org.elasticsearch.index.fielddata.ScriptDocValues.Longs -> org.elasticsear List getDates() } -class org.elasticsearch.index.fielddata.ScriptDocValues.Dates -> org.elasticsearch.index.fielddata.ScriptDocValues$Dates extends List,Collection,Iterable,Object { +class org.elasticsearch.index.fielddata.ScriptDocValues.Dates -> org.elasticsearch.index.fielddata.ScriptDocValues$Dates { org.joda.time.ReadableDateTime get(int) org.joda.time.ReadableDateTime getValue() List getValues() @@ -93,13 +86,13 @@ class org.elasticsearch.index.fielddata.ScriptDocValues.Dates -> org.elasticsear List getDates() } -class org.elasticsearch.index.fielddata.ScriptDocValues.Doubles -> org.elasticsearch.index.fielddata.ScriptDocValues$Doubles extends List,Collection,Iterable,Object { +class org.elasticsearch.index.fielddata.ScriptDocValues.Doubles -> org.elasticsearch.index.fielddata.ScriptDocValues$Doubles { Double get(int) double getValue() List getValues() } -class org.elasticsearch.index.fielddata.ScriptDocValues.GeoPoints -> org.elasticsearch.index.fielddata.ScriptDocValues$GeoPoints extends List,Collection,Iterable,Object { +class org.elasticsearch.index.fielddata.ScriptDocValues.GeoPoints -> org.elasticsearch.index.fielddata.ScriptDocValues$GeoPoints { org.elasticsearch.common.geo.GeoPoint get(int) org.elasticsearch.common.geo.GeoPoint getValue() List getValues() @@ -117,19 +110,19 @@ class org.elasticsearch.index.fielddata.ScriptDocValues.GeoPoints -> org.elastic double geohashDistanceWithDefault(String,double) } -class org.elasticsearch.index.fielddata.ScriptDocValues.Booleans -> org.elasticsearch.index.fielddata.ScriptDocValues$Booleans extends List,Collection,Iterable,Object { +class org.elasticsearch.index.fielddata.ScriptDocValues.Booleans -> org.elasticsearch.index.fielddata.ScriptDocValues$Booleans { Boolean get(int) boolean getValue() List getValues() } -class org.elasticsearch.index.fielddata.ScriptDocValues.BytesRefs -> org.elasticsearch.index.fielddata.ScriptDocValues$BytesRefs extends List,Collection,Iterable,Object { +class org.elasticsearch.index.fielddata.ScriptDocValues.BytesRefs -> org.elasticsearch.index.fielddata.ScriptDocValues$BytesRefs { BytesRef get(int) BytesRef getValue() List getValues() } -class BytesRef -> org.apache.lucene.util.BytesRef extends Object { +class BytesRef -> org.apache.lucene.util.BytesRef { byte[] bytes int offset int length @@ -137,7 +130,7 @@ class BytesRef -> org.apache.lucene.util.BytesRef extends Object { String utf8ToString() } -class org.elasticsearch.index.mapper.IpFieldMapper.IpFieldType.IpScriptDocValues -> org.elasticsearch.index.mapper.IpFieldMapper$IpFieldType$IpScriptDocValues extends List,Collection,Iterable,Object { +class org.elasticsearch.index.mapper.IpFieldMapper.IpFieldType.IpScriptDocValues -> org.elasticsearch.index.mapper.IpFieldMapper$IpFieldType$IpScriptDocValues { String get(int) String getValue() List getValues() @@ -145,9 +138,9 @@ class org.elasticsearch.index.mapper.IpFieldMapper.IpFieldType.IpScriptDocValues # for testing. # currently FeatureTest exposes overloaded constructor, field load store, and overloaded static methods -class org.elasticsearch.painless.FeatureTest -> org.elasticsearch.painless.FeatureTest extends Object { - org.elasticsearch.painless.FeatureTest () - org.elasticsearch.painless.FeatureTest (int,int) +class org.elasticsearch.painless.FeatureTest -> org.elasticsearch.painless.FeatureTest { + () + (int,int) int getX() int getY() void setX(int) @@ -156,32 +149,32 @@ class org.elasticsearch.painless.FeatureTest -> org.elasticsearch.painless.Featu boolean overloadedStatic(boolean) Object twoFunctionsOfX(Function,Function) void listInput(List) - int org.elasticsearch.painless.FeatureTestAugmentation.getTotal() - int org.elasticsearch.painless.FeatureTestAugmentation.addToTotal(int) + int org.elasticsearch.painless.FeatureTestAugmentation getTotal() + int org.elasticsearch.painless.FeatureTestAugmentation addToTotal(int) } -class org.elasticsearch.search.lookup.FieldLookup -> org.elasticsearch.search.lookup.FieldLookup extends Object { +class org.elasticsearch.search.lookup.FieldLookup -> org.elasticsearch.search.lookup.FieldLookup { def getValue() List getValues() boolean isEmpty() } -class org.elasticsearch.index.similarity.ScriptedSimilarity.Query -> org.elasticsearch.index.similarity.ScriptedSimilarity$Query extends Object { +class org.elasticsearch.index.similarity.ScriptedSimilarity.Query -> org.elasticsearch.index.similarity.ScriptedSimilarity$Query { float getBoost() } -class org.elasticsearch.index.similarity.ScriptedSimilarity.Field -> org.elasticsearch.index.similarity.ScriptedSimilarity$Field extends Object { +class org.elasticsearch.index.similarity.ScriptedSimilarity.Field -> org.elasticsearch.index.similarity.ScriptedSimilarity$Field { long getDocCount() long getSumDocFreq() long getSumTotalTermFreq() } -class org.elasticsearch.index.similarity.ScriptedSimilarity.Term -> org.elasticsearch.index.similarity.ScriptedSimilarity$Term extends Object { +class org.elasticsearch.index.similarity.ScriptedSimilarity.Term -> org.elasticsearch.index.similarity.ScriptedSimilarity$Term { long getDocFreq() long getTotalTermFreq() } -class org.elasticsearch.index.similarity.ScriptedSimilarity.Doc -> org.elasticsearch.index.similarity.ScriptedSimilarity$Doc extends Object { +class org.elasticsearch.index.similarity.ScriptedSimilarity.Doc -> org.elasticsearch.index.similarity.ScriptedSimilarity$Doc { int getLength() float getFreq() } From d05aee7eda3a28b11df47e652f44f7d2b185c97d Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Mon, 17 Jul 2017 15:24:43 +0200 Subject: [PATCH 60/67] inner hits: Do not allow inner hits that use _source and have a non nested object field as parent Closes #25315 --- .../index/cache/bitset/BitsetFilterCache.java | 2 +- .../index/mapper/DocumentMapper.java | 15 --------- .../index/mapper/ObjectMapper.java | 29 +++++++++++++++++ .../index/query/NestedQueryBuilder.java | 5 +++ .../search/fetch/FetchPhase.java | 9 +++--- .../index/mapper/NestedObjectMapperTests.java | 32 +++++++++++++++++++ .../search/fetch/subphase/InnerHitsIT.java | 23 +++++++++++-- 7 files changed, 91 insertions(+), 24 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index 7d3e75f6f5d12..2de8f01dc7153 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -239,7 +239,7 @@ public IndexWarmer.TerminationHandle warmReader(final IndexShard indexShard, fin hasNested = true; for (ObjectMapper objectMapper : docMapper.objectMappers().values()) { if (objectMapper.nested().isNested()) { - ObjectMapper parentObjectMapper = docMapper.findParentObjectMapper(objectMapper); + ObjectMapper parentObjectMapper = objectMapper.getParentObjectMapper(mapperService); if (parentObjectMapper != null && parentObjectMapper.nested().isNested()) { warmUp.add(parentObjectMapper.nestedTypeFilter()); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 1dfb7b19eb259..c4de559d1d956 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -292,21 +292,6 @@ public ObjectMapper findNestedObjectMapper(int nestedDocId, SearchContext sc, Le return nestedObjectMapper; } - /** - * Returns the parent {@link ObjectMapper} instance of the specified object mapper or null if there - * isn't any. - */ - // TODO: We should add: ObjectMapper#getParentObjectMapper() - public ObjectMapper findParentObjectMapper(ObjectMapper objectMapper) { - int indexOfLastDot = objectMapper.fullPath().lastIndexOf('.'); - if (indexOfLastDot != -1) { - String parentNestObjectPath = objectMapper.fullPath().substring(0, indexOfLastDot); - return objectMappers().get(parentNestObjectPath); - } else { - return null; - } - } - public boolean isParent(String type) { return mapperService.getParentTypes().contains(type); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index fe592835f97a2..d83ce173d6896 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -396,6 +396,35 @@ public final Dynamic dynamic() { return dynamic; } + /** + * Returns the parent {@link ObjectMapper} instance of the specified object mapper or null if there + * isn't any. + */ + public ObjectMapper getParentObjectMapper(MapperService mapperService) { + int indexOfLastDot = fullPath().lastIndexOf('.'); + if (indexOfLastDot != -1) { + String parentNestObjectPath = fullPath().substring(0, indexOfLastDot); + return mapperService.getObjectMapper(parentNestObjectPath); + } else { + return null; + } + } + + /** + * Returns whether all parent objects fields are nested too. + */ + public boolean parentObjectMapperAreNested(MapperService mapperService) { + for (ObjectMapper parent = getParentObjectMapper(mapperService); + parent != null; + parent = parent.getParentObjectMapper(mapperService)) { + + if (parent.nested().isNested() == false) { + return false; + } + } + return true; + } + @Override public ObjectMapper merge(Mapper mergeWith, boolean updateAllTypes) { if (!(mergeWith instanceof ObjectMapper)) { diff --git a/core/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java index 4e3429e1a2088..9efd86748831c 100644 --- a/core/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java @@ -353,6 +353,11 @@ protected void doBuild(SearchContext parentSearchContext, name, parentSearchContext, parentObjectMapper, nestedObjectMapper ); setupInnerHitsContext(queryShardContext, nestedInnerHits); + if ((nestedInnerHits.hasFetchSourceContext() == false || nestedInnerHits.sourceRequested()) && + nestedObjectMapper.parentObjectMapperAreNested(parentSearchContext.mapperService()) == false) { + throw new IllegalArgumentException("Cannot execute inner hits. One or more parent object fields of nested field [" + + nestedObjectMapper.name() + "] are not nested. All parent fields need to be nested fields too"); + } queryShardContext.nestedScope().previousLevel(); innerHitsContext.addInnerHitDefinition(nestedInnerHits); } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 8892a69f2dfc1..9126b0eaaec1e 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -40,6 +40,7 @@ import org.elasticsearch.index.fieldvisitor.FieldsVisitor; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.Uid; @@ -246,7 +247,7 @@ private SearchHit createNestedSearchHit(SearchContext context, int nestedTopDocI ObjectMapper nestedObjectMapper = documentMapper.findNestedObjectMapper(nestedSubDocId, context, subReaderContext); assert nestedObjectMapper != null; SearchHit.NestedIdentity nestedIdentity = - getInternalNestedIdentity(context, nestedSubDocId, subReaderContext, documentMapper, nestedObjectMapper); + getInternalNestedIdentity(context, nestedSubDocId, subReaderContext, context.mapperService(), nestedObjectMapper); if (source != null) { Tuple> tuple = XContentHelper.convertToMap(source, true); @@ -311,9 +312,7 @@ private Map getSearchFields(SearchContext context, int ne return searchFields; } - private SearchHit.NestedIdentity getInternalNestedIdentity(SearchContext context, int nestedSubDocId, - LeafReaderContext subReaderContext, DocumentMapper documentMapper, - ObjectMapper nestedObjectMapper) throws IOException { + private SearchHit.NestedIdentity getInternalNestedIdentity(SearchContext context, int nestedSubDocId, LeafReaderContext subReaderContext, MapperService mapperService, ObjectMapper nestedObjectMapper) throws IOException { int currentParent = nestedSubDocId; ObjectMapper nestedParentObjectMapper; ObjectMapper current = nestedObjectMapper; @@ -321,7 +320,7 @@ private SearchHit.NestedIdentity getInternalNestedIdentity(SearchContext context SearchHit.NestedIdentity nestedIdentity = null; do { Query parentFilter; - nestedParentObjectMapper = documentMapper.findParentObjectMapper(current); + nestedParentObjectMapper = current.getParentObjectMapper(mapperService); if (nestedParentObjectMapper != null) { if (nestedParentObjectMapper.nested().isNested() == false) { current = nestedParentObjectMapper; diff --git a/core/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index 157033d414884..a3b477a4b6f25 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -36,6 +36,7 @@ import java.util.Collections; import java.util.function.Function; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; @@ -428,4 +429,35 @@ public void testLimitOfNestedFieldsWithMultiTypePerIndex() throws Exception { createIndex("test5", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 0).build()) .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_RECOVERY, false); } + + public void testParentObjectMapperAreNested() throws Exception { + MapperService mapperService = createIndex("index1", Settings.EMPTY, "doc", jsonBuilder().startObject() + .startObject("properties") + .startObject("comments") + .field("type", "nested") + .startObject("properties") + .startObject("messages") + .field("type", "nested").endObject() + .endObject() + .endObject() + .endObject() + .endObject()).mapperService(); + ObjectMapper objectMapper = mapperService.getObjectMapper("comments.messages"); + assertTrue(objectMapper.parentObjectMapperAreNested(mapperService)); + + mapperService = createIndex("index2", Settings.EMPTY, "doc", jsonBuilder().startObject() + .startObject("properties") + .startObject("comments") + .field("type", "object") + .startObject("properties") + .startObject("messages") + .field("type", "nested").endObject() + .endObject() + .endObject() + .endObject() + .endObject()).mapperService(); + objectMapper = mapperService.getObjectMapper("comments.messages"); + assertFalse(objectMapper.parentObjectMapperAreNested(mapperService)); + } + } diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java index 079db8097f2b0..902747b35a887 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java @@ -411,9 +411,26 @@ public void testInnerHitsWithObjectFieldThatHasANestedField() throws Exception { .endObject())); indexRandom(true, requests); + SearchPhaseExecutionException e = expectThrows( + SearchPhaseExecutionException.class, + () -> client().prepareSearch("articles").setQuery(nestedQuery("comments.messages", + matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit(new InnerHitBuilder())).get() + ); + assertEquals("Cannot execute inner hits. One or more parent object fields of nested field [comments.messages] are " + + "not nested. All parent fields need to be nested fields too", e.shardFailures()[0].getCause().getMessage()); + + e = expectThrows( + SearchPhaseExecutionException.class, + () -> client().prepareSearch("articles").setQuery(nestedQuery("comments.messages", + matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit(new InnerHitBuilder() + .setFetchSourceContext(new FetchSourceContext(true)))).get() + ); + assertEquals("Cannot execute inner hits. One or more parent object fields of nested field [comments.messages] are " + + "not nested. All parent fields need to be nested fields too", e.shardFailures()[0].getCause().getMessage()); + SearchResponse response = client().prepareSearch("articles") .setQuery(nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox"), ScoreMode.Avg) - .innerHit(new InnerHitBuilder())).get(); + .innerHit(new InnerHitBuilder().setFetchSourceContext(new FetchSourceContext(false)))).get(); assertNoFailures(response); assertHitCount(response, 1); SearchHit hit = response.getHits().getAt(0); @@ -427,7 +444,7 @@ public void testInnerHitsWithObjectFieldThatHasANestedField() throws Exception { response = client().prepareSearch("articles") .setQuery(nestedQuery("comments.messages", matchQuery("comments.messages.message", "bear"), ScoreMode.Avg) - .innerHit(new InnerHitBuilder())).get(); + .innerHit(new InnerHitBuilder().setFetchSourceContext(new FetchSourceContext(false)))).get(); assertNoFailures(response); assertHitCount(response, 1); hit = response.getHits().getAt(0); @@ -448,7 +465,7 @@ public void testInnerHitsWithObjectFieldThatHasANestedField() throws Exception { indexRandom(true, requests); response = client().prepareSearch("articles") .setQuery(nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox"), ScoreMode.Avg) - .innerHit(new InnerHitBuilder())).get(); + .innerHit(new InnerHitBuilder().setFetchSourceContext(new FetchSourceContext(false)))).get(); assertNoFailures(response); assertHitCount(response, 1); hit = response.getHits().getAt(0);; From f782f618cc798f25343ec5f00bef3141d53b7f82 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Mon, 31 Jul 2017 14:47:30 +0200 Subject: [PATCH 61/67] Moved the check to fetch phase. This basically means that we throw a better error message instead of an AOBE and not adding more restrictions. --- .../index/query/NestedQueryBuilder.java | 5 -- .../search/fetch/FetchPhase.java | 13 +++-- .../search/fetch/subphase/InnerHitsIT.java | 47 ++++++++++--------- 3 files changed, 35 insertions(+), 30 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java index 9efd86748831c..4e3429e1a2088 100644 --- a/core/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java @@ -353,11 +353,6 @@ protected void doBuild(SearchContext parentSearchContext, name, parentSearchContext, parentObjectMapper, nestedObjectMapper ); setupInnerHitsContext(queryShardContext, nestedInnerHits); - if ((nestedInnerHits.hasFetchSourceContext() == false || nestedInnerHits.sourceRequested()) && - nestedObjectMapper.parentObjectMapperAreNested(parentSearchContext.mapperService()) == false) { - throw new IllegalArgumentException("Cannot execute inner hits. One or more parent object fields of nested field [" + - nestedObjectMapper.name() + "] are not nested. All parent fields need to be nested fields too"); - } queryShardContext.nestedScope().previousLevel(); innerHitsContext.addInnerHitDefinition(nestedInnerHits); } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 9126b0eaaec1e..8ec4df4688b63 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -263,18 +263,23 @@ private SearchHit createNestedSearchHit(SearchContext context, int nestedTopDocI String nestedPath = nested.getField().string(); current.put(nestedPath, new HashMap<>()); Object extractedValue = XContentMapValues.extractValue(nestedPath, sourceAsMap); - List> nestedParsedSource; + List nestedParsedSource; if (extractedValue instanceof List) { // nested field has an array value in the _source - nestedParsedSource = (List>) extractedValue; + nestedParsedSource = (List) extractedValue; } else if (extractedValue instanceof Map) { // nested field has an object value in the _source. This just means the nested field has just one inner object, // which is valid, but uncommon. - nestedParsedSource = Collections.singletonList((Map) extractedValue); + nestedParsedSource = Collections.singletonList(extractedValue); } else { throw new IllegalStateException("extracted source isn't an object or an array"); } - sourceAsMap = nestedParsedSource.get(nested.getOffset()); + if ((nestedParsedSource.get(0) instanceof Map) == false && + nestedObjectMapper.parentObjectMapperAreNested(context.mapperService()) == false) { + throw new IllegalArgumentException("Cannot execute inner hits. One or more parent object fields of nested field [" + + nestedObjectMapper.name() + "] are not nested. All parent fields need to be nested fields too"); + } + sourceAsMap = (Map) nestedParsedSource.get(nested.getOffset()); if (nested.getChild() == null) { current.put(nestedPath, sourceAsMap); } else { diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java index 902747b35a887..e596ee206d19b 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java @@ -402,33 +402,34 @@ public void testInnerHitsWithObjectFieldThatHasANestedField() throws Exception { List requests = new ArrayList<>(); requests.add(client().prepareIndex("articles", "article", "1").setSource(jsonBuilder().startObject() .field("title", "quick brown fox") - .startObject("comments") - .startArray("messages") - .startObject().field("message", "fox eat quick").endObject() - .startObject().field("message", "bear eat quick").endObject() + .startArray("comments") + .startObject() + .startArray("messages") + .startObject().field("message", "fox eat quick").endObject() + .startObject().field("message", "bear eat quick").endObject() + .endArray() + .endObject() + .startObject() + .startArray("messages") + .startObject().field("message", "no fox").endObject() + .endArray() + .endObject() .endArray() - .endObject() .endObject())); indexRandom(true, requests); - SearchPhaseExecutionException e = expectThrows( - SearchPhaseExecutionException.class, - () -> client().prepareSearch("articles").setQuery(nestedQuery("comments.messages", - matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit(new InnerHitBuilder())).get() - ); + SearchResponse response = client().prepareSearch("articles").setQuery(nestedQuery("comments.messages", + matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit(new InnerHitBuilder())).get(); assertEquals("Cannot execute inner hits. One or more parent object fields of nested field [comments.messages] are " + - "not nested. All parent fields need to be nested fields too", e.shardFailures()[0].getCause().getMessage()); + "not nested. All parent fields need to be nested fields too", response.getShardFailures()[0].getCause().getMessage()); - e = expectThrows( - SearchPhaseExecutionException.class, - () -> client().prepareSearch("articles").setQuery(nestedQuery("comments.messages", - matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit(new InnerHitBuilder() - .setFetchSourceContext(new FetchSourceContext(true)))).get() - ); + response = client().prepareSearch("articles").setQuery(nestedQuery("comments.messages", + matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit(new InnerHitBuilder() + .setFetchSourceContext(new FetchSourceContext(true)))).get(); assertEquals("Cannot execute inner hits. One or more parent object fields of nested field [comments.messages] are " + - "not nested. All parent fields need to be nested fields too", e.shardFailures()[0].getCause().getMessage()); + "not nested. All parent fields need to be nested fields too", response.getShardFailures()[0].getCause().getMessage()); - SearchResponse response = client().prepareSearch("articles") + response = client().prepareSearch("articles") .setQuery(nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox"), ScoreMode.Avg) .innerHit(new InnerHitBuilder().setFetchSourceContext(new FetchSourceContext(false)))).get(); assertNoFailures(response); @@ -436,11 +437,15 @@ public void testInnerHitsWithObjectFieldThatHasANestedField() throws Exception { SearchHit hit = response.getHits().getAt(0); assertThat(hit.getId(), equalTo("1")); SearchHits messages = hit.getInnerHits().get("comments.messages"); - assertThat(messages.getTotalHits(), equalTo(1L)); + assertThat(messages.getTotalHits(), equalTo(2L)); assertThat(messages.getAt(0).getId(), equalTo("1")); assertThat(messages.getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); - assertThat(messages.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(messages.getAt(0).getNestedIdentity().getOffset(), equalTo(2)); assertThat(messages.getAt(0).getNestedIdentity().getChild(), nullValue()); + assertThat(messages.getAt(1).getId(), equalTo("1")); + assertThat(messages.getAt(1).getNestedIdentity().getField().string(), equalTo("comments.messages")); + assertThat(messages.getAt(1).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(messages.getAt(1).getNestedIdentity().getChild(), nullValue()); response = client().prepareSearch("articles") .setQuery(nestedQuery("comments.messages", matchQuery("comments.messages.message", "bear"), ScoreMode.Avg) From a3a6ce6220ac271ce538d7c92b8a9a6d7c982631 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Mon, 31 Jul 2017 15:21:50 +0200 Subject: [PATCH 62/67] fix line length violation --- .../main/java/org/elasticsearch/search/fetch/FetchPhase.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 8ec4df4688b63..736e84d09af5e 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -317,7 +317,10 @@ private Map getSearchFields(SearchContext context, int ne return searchFields; } - private SearchHit.NestedIdentity getInternalNestedIdentity(SearchContext context, int nestedSubDocId, LeafReaderContext subReaderContext, MapperService mapperService, ObjectMapper nestedObjectMapper) throws IOException { + private SearchHit.NestedIdentity getInternalNestedIdentity(SearchContext context, int nestedSubDocId, + LeafReaderContext subReaderContext, + MapperService mapperService, + ObjectMapper nestedObjectMapper) throws IOException { int currentParent = nestedSubDocId; ObjectMapper nestedParentObjectMapper; ObjectMapper current = nestedObjectMapper; From 6c46a67dd6ddc2f3c8ddda9a311aef96da83d4f3 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Wed, 9 Aug 2017 08:58:36 +0200 Subject: [PATCH 63/67] added comment --- .../main/java/org/elasticsearch/search/fetch/FetchPhase.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 736e84d09af5e..f05e38636a0cd 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -276,6 +276,11 @@ private SearchHit createNestedSearchHit(SearchContext context, int nestedTopDocI } if ((nestedParsedSource.get(0) instanceof Map) == false && nestedObjectMapper.parentObjectMapperAreNested(context.mapperService()) == false) { + // When one of the parent objects are not nested then XContentMapValues.extractValue(...) extracts the values + // from two or more layers resulting in a list of list being returned. This is because nestedPath + // encapsulates two or more object layers in the _source. + // + // This is why only the first element of nestedParsedSource needs to be checked. throw new IllegalArgumentException("Cannot execute inner hits. One or more parent object fields of nested field [" + nestedObjectMapper.name() + "] are not nested. All parent fields need to be nested fields too"); } From 2db3bccd3789ec0648037dd39bab5c7aa5ab8299 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 19 Sep 2017 10:02:05 +0000 Subject: [PATCH 64/67] Invalid JSON request body caused endless loop (#26680) Request bodys that only consists of a String value can lead to endless loops in the parser of several rest requests like e.g. `_count`. Up to 5.2 this seems to have been caught in the logic guessing the content type of the request, but since then it causes the node to block. This change introduces checks for receiving a valid xContent object before starting the parsing in RestActions#parseTopLevelQueryBuilder(). Closes #26083 --- .../rest/action/RestActions.java | 9 ++++++ .../rest/action/RestActionsTests.java | 32 ++++++++++++++++--- 2 files changed, 37 insertions(+), 4 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/rest/action/RestActions.java b/core/src/main/java/org/elasticsearch/rest/action/RestActions.java index 61e3ded6456b6..759cd4a773dd9 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/RestActions.java +++ b/core/src/main/java/org/elasticsearch/rest/action/RestActions.java @@ -243,6 +243,15 @@ public RestResponse buildResponse(NodesResponse response, XContentBuilder builde private static QueryBuilder parseTopLevelQueryBuilder(XContentParser parser) { try { QueryBuilder queryBuilder = null; + XContentParser.Token first = parser.nextToken(); + if (first == null) { + return null; + } else if (first != XContentParser.Token.START_OBJECT) { + throw new ParsingException( + parser.getTokenLocation(), "Expected [" + XContentParser.Token.START_OBJECT + + "] but found [" + first + "]", parser.getTokenLocation() + ); + } for (XContentParser.Token token = parser.nextToken(); token != XContentParser.Token.END_OBJECT; token = parser.nextToken()) { if (token == XContentParser.Token.FIELD_NAME) { String fieldName = parser.currentName(); diff --git a/core/src/test/java/org/elasticsearch/rest/action/RestActionsTests.java b/core/src/test/java/org/elasticsearch/rest/action/RestActionsTests.java index 7272243e3cf93..401cc79b02092 100644 --- a/core/src/test/java/org/elasticsearch/rest/action/RestActionsTests.java +++ b/core/src/test/java/org/elasticsearch/rest/action/RestActionsTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.rest.action; +import com.fasterxml.jackson.core.io.JsonEOFException; +import java.util.Arrays; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -59,10 +61,32 @@ public void testParseTopLevelBuilder() throws IOException { } public void testParseTopLevelBuilderEmptyObject() throws IOException { - String requestBody = "{}"; - try (XContentParser parser = createParser(JsonXContent.jsonXContent, requestBody)) { - QueryBuilder query = RestActions.getQueryContent(parser); - assertNull(query); + for (String requestBody : Arrays.asList("{}", "")) { + try (XContentParser parser = createParser(JsonXContent.jsonXContent, requestBody)) { + QueryBuilder query = RestActions.getQueryContent(parser); + assertNull(query); + } + } + } + + public void testParseTopLevelBuilderMalformedJson() throws IOException { + for (String requestBody : Arrays.asList("\"\"", "\"someString\"", "\"{\"")) { + try (XContentParser parser = createParser(JsonXContent.jsonXContent, requestBody)) { + ParsingException exception = + expectThrows(ParsingException.class, () -> RestActions.getQueryContent(parser)); + assertEquals("Expected [START_OBJECT] but found [VALUE_STRING]", exception.getMessage()); + } + } + } + + public void testParseTopLevelBuilderIncompleteJson() throws IOException { + for (String requestBody : Arrays.asList("{", "{ \"query\" :")) { + try (XContentParser parser = createParser(JsonXContent.jsonXContent, requestBody)) { + ParsingException exception = + expectThrows(ParsingException.class, () -> RestActions.getQueryContent(parser)); + assertEquals("Failed to parse", exception.getMessage()); + assertEquals(JsonEOFException.class, exception.getRootCause().getClass()); + } } } From 256721018bd3a8be4a069646473e0bf0c7c84bac Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 19 Sep 2017 06:27:56 -0400 Subject: [PATCH 65/67] Move pre-6.0 node checkpoint to SequenceNumbers This commit moves the pre-6.0 node checkpoint constant from SequenceNumbersService to SequenceNumbers so it can chill with the other sequence number-related constants. Relates #26690 --- .../TransportResyncReplicationAction.java | 4 ++-- .../TransportReplicationAction.java | 5 ++-- .../seqno/GlobalCheckpointSyncAction.java | 2 +- .../index/seqno/GlobalCheckpointTracker.java | 24 +++++++++---------- .../index/seqno/SequenceNumbers.java | 4 ++++ .../index/seqno/SequenceNumbersService.java | 5 ---- 6 files changed, 21 insertions(+), 23 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java b/core/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java index 514cbca04cc7c..d217717faeba0 100644 --- a/core/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java @@ -32,7 +32,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.PrimaryReplicaSyncer; import org.elasticsearch.index.translog.Translog; @@ -93,7 +93,7 @@ protected void sendReplicaRequest( if (node.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { super.sendReplicaRequest(replicaRequest, node, listener); } else { - final long pre60NodeCheckpoint = SequenceNumbersService.PRE_60_NODE_CHECKPOINT; + final long pre60NodeCheckpoint = SequenceNumbers.PRE_60_NODE_CHECKPOINT; listener.onResponse(new ReplicaResponse(pre60NodeCheckpoint, pre60NodeCheckpoint)); } } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 15fc8075fa108..516554d92e8cd 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -54,7 +54,6 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.seqno.SequenceNumbers; -import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ReplicationGroup; @@ -1055,12 +1054,12 @@ public void readFrom(StreamInput in) throws IOException { localCheckpoint = in.readZLong(); } else { // 5.x used to read empty responses, which don't really read anything off the stream, so just do nothing. - localCheckpoint = SequenceNumbersService.PRE_60_NODE_CHECKPOINT; + localCheckpoint = SequenceNumbers.PRE_60_NODE_CHECKPOINT; } if (in.getVersion().onOrAfter(Version.V_6_0_0_rc1)) { globalCheckpoint = in.readZLong(); } else { - globalCheckpoint = SequenceNumbersService.PRE_60_NODE_CHECKPOINT; + globalCheckpoint = SequenceNumbers.PRE_60_NODE_CHECKPOINT; } } diff --git a/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java b/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java index e89d231ae6eb0..e6d8b8e8d3ff8 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java @@ -89,7 +89,7 @@ protected void sendReplicaRequest( if (node.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { super.sendReplicaRequest(replicaRequest, node, listener); } else { - final long pre60NodeCheckpoint = SequenceNumbersService.PRE_60_NODE_CHECKPOINT; + final long pre60NodeCheckpoint = SequenceNumbers.PRE_60_NODE_CHECKPOINT; listener.onResponse(new ReplicaResponse(pre60NodeCheckpoint, pre60NodeCheckpoint)); } } diff --git a/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointTracker.java b/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointTracker.java index ee1f2d7c8715a..8d00723a4b859 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointTracker.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointTracker.java @@ -230,7 +230,7 @@ private boolean invariant() { // local checkpoints only set during primary mode assert primaryMode || checkpoints.values().stream() .allMatch(lcps -> lcps.localCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO || - lcps.localCheckpoint == SequenceNumbersService.PRE_60_NODE_CHECKPOINT); + lcps.localCheckpoint == SequenceNumbers.PRE_60_NODE_CHECKPOINT); // global checkpoints for other shards only set during primary mode assert primaryMode @@ -241,7 +241,7 @@ private boolean invariant() { .map(Map.Entry::getValue) .allMatch(cps -> (cps.globalCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO - || cps.globalCheckpoint == SequenceNumbersService.PRE_60_NODE_CHECKPOINT)); + || cps.globalCheckpoint == SequenceNumbers.PRE_60_NODE_CHECKPOINT)); // relocation handoff can only occur in primary mode assert !handoffInProgress || primaryMode; @@ -316,7 +316,7 @@ private static long inSyncCheckpointStates( .stream() .filter(cps -> cps.inSync) .mapToLong(function) - .filter(v -> v != SequenceNumbersService.PRE_60_NODE_CHECKPOINT && v != SequenceNumbers.UNASSIGNED_SEQ_NO)); + .filter(v -> v != SequenceNumbers.PRE_60_NODE_CHECKPOINT && v != SequenceNumbers.UNASSIGNED_SEQ_NO)); return value.isPresent() ? value.getAsLong() : SequenceNumbers.UNASSIGNED_SEQ_NO; } @@ -473,7 +473,7 @@ public synchronized void updateFromMaster(final long applyingClusterStateVersion assert inSync == false : "update from master in primary mode has " + initializingId + " as in-sync but it does not exist locally"; final long localCheckpoint = pre60AllocationIds.contains(initializingId) ? - SequenceNumbersService.PRE_60_NODE_CHECKPOINT : SequenceNumbers.UNASSIGNED_SEQ_NO; + SequenceNumbers.PRE_60_NODE_CHECKPOINT : SequenceNumbers.UNASSIGNED_SEQ_NO; final long globalCheckpoint = localCheckpoint; checkpoints.put(initializingId, new CheckpointState(localCheckpoint, globalCheckpoint, inSync)); } @@ -482,7 +482,7 @@ public synchronized void updateFromMaster(final long applyingClusterStateVersion for (String initializingId : initializingAllocationIds) { if (shardAllocationId.equals(initializingId) == false) { final long localCheckpoint = pre60AllocationIds.contains(initializingId) ? - SequenceNumbersService.PRE_60_NODE_CHECKPOINT : SequenceNumbers.UNASSIGNED_SEQ_NO; + SequenceNumbers.PRE_60_NODE_CHECKPOINT : SequenceNumbers.UNASSIGNED_SEQ_NO; final long globalCheckpoint = localCheckpoint; checkpoints.put(initializingId, new CheckpointState(localCheckpoint, globalCheckpoint, false)); } @@ -493,7 +493,7 @@ public synchronized void updateFromMaster(final long applyingClusterStateVersion checkpoints.get(shardAllocationId).inSync = true; } else { final long localCheckpoint = pre60AllocationIds.contains(inSyncId) ? - SequenceNumbersService.PRE_60_NODE_CHECKPOINT : SequenceNumbers.UNASSIGNED_SEQ_NO; + SequenceNumbers.PRE_60_NODE_CHECKPOINT : SequenceNumbers.UNASSIGNED_SEQ_NO; final long globalCheckpoint = localCheckpoint; checkpoints.put(inSyncId, new CheckpointState(localCheckpoint, globalCheckpoint, true)); } @@ -576,8 +576,8 @@ public synchronized void markAllocationIdAsInSync(final String allocationId, fin private boolean updateLocalCheckpoint(String allocationId, CheckpointState cps, long localCheckpoint) { // a local checkpoint of PRE_60_NODE_CHECKPOINT cannot be overridden - assert cps.localCheckpoint != SequenceNumbersService.PRE_60_NODE_CHECKPOINT || - localCheckpoint == SequenceNumbersService.PRE_60_NODE_CHECKPOINT : + assert cps.localCheckpoint != SequenceNumbers.PRE_60_NODE_CHECKPOINT || + localCheckpoint == SequenceNumbers.PRE_60_NODE_CHECKPOINT : "pre-6.0 shard copy " + allocationId + " unexpected to send valid local checkpoint " + localCheckpoint; // a local checkpoint for a shard copy should be a valid sequence number or the pre-6.0 sequence number indicator assert localCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO : @@ -640,7 +640,7 @@ private static long computeGlobalCheckpoint(final Set pendingInSync, fin if (cps.localCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO) { // unassigned in-sync replica return fallback; - } else if (cps.localCheckpoint == SequenceNumbersService.PRE_60_NODE_CHECKPOINT) { + } else if (cps.localCheckpoint == SequenceNumbers.PRE_60_NODE_CHECKPOINT) { // 5.x replica, ignore for global checkpoint calculation } else { minLocalCheckpoint = Math.min(cps.localCheckpoint, minLocalCheckpoint); @@ -713,13 +713,13 @@ public synchronized void completeRelocationHandoff() { checkpoints.entrySet().stream().forEach(e -> { final CheckpointState cps = e.getValue(); if (cps.localCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO && - cps.localCheckpoint != SequenceNumbersService.PRE_60_NODE_CHECKPOINT) { + cps.localCheckpoint != SequenceNumbers.PRE_60_NODE_CHECKPOINT) { cps.localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; } if (e.getKey().equals(shardAllocationId) == false) { // don't throw global checkpoint information of current shard away if (cps.globalCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO && - cps.globalCheckpoint != SequenceNumbersService.PRE_60_NODE_CHECKPOINT) { + cps.globalCheckpoint != SequenceNumbers.PRE_60_NODE_CHECKPOINT) { cps.globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; } } @@ -763,7 +763,7 @@ private Runnable getMasterUpdateOperationFromCurrentState() { if (entry.getValue().inSync) { inSyncAllocationIds.add(entry.getKey()); } - if (entry.getValue().getLocalCheckpoint() == SequenceNumbersService.PRE_60_NODE_CHECKPOINT) { + if (entry.getValue().getLocalCheckpoint() == SequenceNumbers.PRE_60_NODE_CHECKPOINT) { pre60AllocationIds.add(entry.getKey()); } }); diff --git a/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java b/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java index cf878f613a710..21b4134f9837e 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java @@ -28,6 +28,10 @@ public class SequenceNumbers { public static final String LOCAL_CHECKPOINT_KEY = "local_checkpoint"; public static final String MAX_SEQ_NO = "max_seq_no"; + /** + * Represents a checkpoint coming from a pre-6.0 node + */ + public static final long PRE_60_NODE_CHECKPOINT = -3L; /** * Represents an unassigned sequence number (e.g., can be used on primary operations before they are executed). */ diff --git a/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java b/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java index fa0d0bc9b34b5..760fbe0a5fc07 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java @@ -33,11 +33,6 @@ */ public class SequenceNumbersService extends AbstractIndexShardComponent { - /** - * Represents a local checkpoint coming from a pre-6.0 node - */ - public static final long PRE_60_NODE_CHECKPOINT = -3L; - private final LocalCheckpointTracker localCheckpointTracker; private final GlobalCheckpointTracker globalCheckpointTracker; From 332b4d12fa9cb25ad62620d5009ccd3321177d78 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Tue, 19 Sep 2017 15:13:45 +0200 Subject: [PATCH 66/67] test: Use a single primary shard so that the exception can caught in the same way --- .../search/fetch/subphase/InnerHitsIT.java | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java index e596ee206d19b..f20387b47db1b 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexSettings; @@ -386,6 +387,9 @@ public void testNestedDefinedAsObject() throws Exception { public void testInnerHitsWithObjectFieldThatHasANestedField() throws Exception { assertAcked(prepareCreate("articles") + // number_of_shards = 1, because then we catch the expected exception in the same way. + // (See expectThrows(...) below) + .setSettings(Settings.builder().put("index.number_of_shards", 1)) .addMapping("article", jsonBuilder().startObject() .startObject("properties") .startObject("comments") @@ -418,18 +422,18 @@ public void testInnerHitsWithObjectFieldThatHasANestedField() throws Exception { .endObject())); indexRandom(true, requests); - SearchResponse response = client().prepareSearch("articles").setQuery(nestedQuery("comments.messages", - matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit(new InnerHitBuilder())).get(); + Exception e = expectThrows(Exception.class, () -> client().prepareSearch("articles").setQuery(nestedQuery("comments.messages", + matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit(new InnerHitBuilder())).get()); assertEquals("Cannot execute inner hits. One or more parent object fields of nested field [comments.messages] are " + - "not nested. All parent fields need to be nested fields too", response.getShardFailures()[0].getCause().getMessage()); + "not nested. All parent fields need to be nested fields too", e.getCause().getCause().getMessage()); - response = client().prepareSearch("articles").setQuery(nestedQuery("comments.messages", + e = expectThrows(Exception.class, () -> client().prepareSearch("articles").setQuery(nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit(new InnerHitBuilder() - .setFetchSourceContext(new FetchSourceContext(true)))).get(); + .setFetchSourceContext(new FetchSourceContext(true)))).get()); assertEquals("Cannot execute inner hits. One or more parent object fields of nested field [comments.messages] are " + - "not nested. All parent fields need to be nested fields too", response.getShardFailures()[0].getCause().getMessage()); + "not nested. All parent fields need to be nested fields too", e.getCause().getCause().getMessage()); - response = client().prepareSearch("articles") + SearchResponse response = client().prepareSearch("articles") .setQuery(nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox"), ScoreMode.Avg) .innerHit(new InnerHitBuilder().setFetchSourceContext(new FetchSourceContext(false)))).get(); assertNoFailures(response); From 04385a9ce914125a3eab626c657ef60d026cde1f Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Tue, 19 Sep 2017 15:58:36 +0200 Subject: [PATCH 67/67] Restoring from snapshot should force generation of a new history uuid (#26694) Restoring a shard from snapshot throws the primary back in time violating assumptions and bringing the validity of global checkpoints in question. To avoid problems, we should make sure that a shard that was restored will never be the source of an ops based recovery to a shard that existed before the restore. To this end we have introduced the notion of `histroy_uuid` in #26577 and required that both source and target will have the same history to allow ops based recoveries. This PR make sure that a shard gets a new uuid after restore. As suggested by @ywelsch , I derived the creation of a `history_uuid` from the `RecoverySource` of the shard. Store recovery will only generate a uuid if it doesn't already exist (we can make this stricter when we don't need to deal with 5.x indices). Peer recovery follows the same logic (note that this is different than the approach in #26557, I went this way as it means that shards always have a history uuid after being recovered on a 6.x node and will also mean that a rolling restart is enough for old indices to step over to the new seq no model). Local shards and snapshot force the generation of a new translog uuid. Relates #10708 Closes #26544 --- .../index/engine/EngineConfig.java | 16 ++- .../index/engine/InternalEngine.java | 71 ++++++----- .../elasticsearch/index/shard/IndexShard.java | 16 ++- .../index/shard/StoreRecovery.java | 5 +- .../index/engine/InternalEngineTests.java | 62 +++++++-- .../index/shard/RefreshListenersTests.java | 2 +- .../SharedClusterSnapshotRestoreIT.java | 34 ++++- .../elasticsearch/upgrades/RecoveryIT.java | 119 ++++++++++++++++++ 8 files changed, 273 insertions(+), 52 deletions(-) create mode 100644 qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 66911ab80c723..fbc87f2279b3d 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -71,6 +71,7 @@ public final class EngineConfig { private final List refreshListeners; @Nullable private final Sort indexSort; + private final boolean forceNewHistoryUUID; private final TranslogRecoveryRunner translogRecoveryRunner; /** @@ -115,8 +116,9 @@ public EngineConfig(OpenMode openMode, ShardId shardId, String allocationId, Thr MergePolicy mergePolicy, Analyzer analyzer, Similarity similarity, CodecService codecService, Engine.EventListener eventListener, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy, - TranslogConfig translogConfig, TimeValue flushMergesAfter, List refreshListeners, - Sort indexSort, TranslogRecoveryRunner translogRecoveryRunner) { + boolean forceNewHistoryUUID, TranslogConfig translogConfig, TimeValue flushMergesAfter, + List refreshListeners, Sort indexSort, + TranslogRecoveryRunner translogRecoveryRunner) { if (openMode == null) { throw new IllegalArgumentException("openMode must not be null"); } @@ -141,6 +143,7 @@ public EngineConfig(OpenMode openMode, ShardId shardId, String allocationId, Thr this.translogConfig = translogConfig; this.flushMergesAfter = flushMergesAfter; this.openMode = openMode; + this.forceNewHistoryUUID = forceNewHistoryUUID; this.refreshListeners = refreshListeners; this.indexSort = indexSort; this.translogRecoveryRunner = translogRecoveryRunner; @@ -300,6 +303,15 @@ public OpenMode getOpenMode() { return openMode; } + + /** + * Returns true if a new history uuid must be generated. If false, a new uuid will only be generated if no existing + * one is found. + */ + public boolean getForceNewHistoryUUID() { + return forceNewHistoryUUID; + } + @FunctionalInterface public interface TranslogRecoveryRunner { int run(Engine engine, Translog.Snapshot snapshot) throws IOException; diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index d7cf3e16069e1..3655a2096ddd4 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -177,23 +177,15 @@ public InternalEngine(EngineConfig engineConfig) throws EngineException { switch (openMode) { case OPEN_INDEX_AND_TRANSLOG: writer = createWriter(false); - String existingHistoryUUID = loadHistoryUUIDFromCommit(writer); - if (existingHistoryUUID == null) { - historyUUID = UUIDs.randomBase64UUID(); - } else { - historyUUID = existingHistoryUUID; - } final long globalCheckpoint = Translog.readGlobalCheckpoint(engineConfig.getTranslogConfig().getTranslogPath()); seqNoStats = store.loadSeqNoStats(globalCheckpoint); break; case OPEN_INDEX_CREATE_TRANSLOG: writer = createWriter(false); - historyUUID = loadHistoryUUIDFromCommit(writer); seqNoStats = store.loadSeqNoStats(SequenceNumbers.UNASSIGNED_SEQ_NO); break; case CREATE_INDEX_AND_TRANSLOG: writer = createWriter(true); - historyUUID = UUIDs.randomBase64UUID(); seqNoStats = new SeqNoStats( SequenceNumbers.NO_OPS_PERFORMED, SequenceNumbers.NO_OPS_PERFORMED, @@ -205,9 +197,13 @@ public InternalEngine(EngineConfig engineConfig) throws EngineException { logger.trace("recovered [{}]", seqNoStats); seqNoService = sequenceNumberService(shardId, allocationId, engineConfig.getIndexSettings(), seqNoStats); updateMaxUnsafeAutoIdTimestampFromWriter(writer); + historyUUID = loadOrGenerateHistoryUUID(writer, engineConfig.getForceNewHistoryUUID()); + Objects.requireNonNull(historyUUID, "history uuid should not be null"); indexWriter = writer; translog = openTranslog(engineConfig, writer, translogDeletionPolicy, () -> seqNoService().getGlobalCheckpoint()); assert translog.getGeneration() != null; + this.translog = translog; + updateWriterOnOpen(); } catch (IOException | TranslogCorruptedException e) { throw new EngineCreationFailureException(shardId, "failed to create engine", e); } catch (AssertionError e) { @@ -219,8 +215,6 @@ public InternalEngine(EngineConfig engineConfig) throws EngineException { throw e; } } - - this.translog = translog; manager = createSearcherManager(); this.searcherManager = manager; this.versionMap.setManager(searcherManager); @@ -375,24 +369,32 @@ private Translog openTranslog(EngineConfig engineConfig, IndexWriter writer, Tra throw new IndexFormatTooOldException("translog", "translog has no generation nor a UUID - this might be an index from a previous version consider upgrading to N-1 first"); } } - final Translog translog = new Translog(translogConfig, translogUUID, translogDeletionPolicy, globalCheckpointSupplier); - if (translogUUID == null) { - assert openMode != EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG : "OpenMode must not be " - + EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG; - boolean success = false; - try { - commitIndexWriter(writer, translog, openMode == EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG - ? commitDataAsMap(writer).get(SYNC_COMMIT_ID) : null); - success = true; - } finally { - if (success == false) { - IOUtils.closeWhileHandlingException(translog); - } - } + return new Translog(translogConfig, translogUUID, translogDeletionPolicy, globalCheckpointSupplier); + } + + /** If needed, updates the metadata in the index writer to match the potentially new translog and history uuid */ + private void updateWriterOnOpen() throws IOException { + Objects.requireNonNull(historyUUID); + final Map commitUserData = commitDataAsMap(indexWriter); + boolean needsCommit = false; + if (historyUUID.equals(commitUserData.get(HISTORY_UUID_KEY)) == false) { + needsCommit = true; + } else { + assert config().getForceNewHistoryUUID() == false : "config forced a new history uuid but it didn't change"; + assert openMode != EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG : "new index but it already has an existing history uuid"; + } + if (translog.getTranslogUUID().equals(commitUserData.get(Translog.TRANSLOG_UUID_KEY)) == false) { + needsCommit = true; + } else { + assert openMode == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG : "translog uuid didn't change but open mode is " + openMode; + } + if (needsCommit) { + commitIndexWriter(indexWriter, translog, openMode == EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG + ? commitUserData.get(SYNC_COMMIT_ID) : null); } - return translog; } + @Override public Translog getTranslog() { ensureOpen(); @@ -424,14 +426,17 @@ private String loadTranslogUUIDFromCommit(IndexWriter writer) throws IOException } /** - * Reads the current stored history ID from the IW commit data. If the id is not found, returns null. + * Reads the current stored history ID from the IW commit data. Generates a new UUID if not found or if generation is forced. */ - @Nullable - private String loadHistoryUUIDFromCommit(final IndexWriter writer) throws IOException { + private String loadOrGenerateHistoryUUID(final IndexWriter writer, boolean forceNew) throws IOException { String uuid = commitDataAsMap(writer).get(HISTORY_UUID_KEY); - if (uuid == null) { - assert config().getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_rc1) : - "index was created after 6_0_0_rc1 but has no history uuid"; + if (uuid == null || forceNew) { + assert + forceNew || // recovery from a local store creates an index that doesn't have yet a history_uuid + openMode == EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG || + config().getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_rc1) : + "existing index was created after 6_0_0_rc1 but has no history uuid"; + uuid = UUIDs.randomBase64UUID(); } return uuid; } @@ -1923,9 +1928,7 @@ protected void commitIndexWriter(final IndexWriter writer, final Translog transl } commitData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(seqNoService().getMaxSeqNo())); commitData.put(MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, Long.toString(maxUnsafeAutoIdTimestamp.get())); - if (historyUUID != null) { - commitData.put(HISTORY_UUID_KEY, historyUUID); - } + commitData.put(HISTORY_UUID_KEY, historyUUID); logger.trace("committing writer with commit data [{}]", commitData); return commitData.entrySet().iterator(); }); diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 24ad4cdb1b85c..f4a771a3b3f4f 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -2099,10 +2099,24 @@ private DocumentMapperForType docMapper(String type) { private EngineConfig newEngineConfig(EngineConfig.OpenMode openMode) { Sort indexSort = indexSortSupplier.get(); + final boolean forceNewHistoryUUID; + switch (shardRouting.recoverySource().getType()) { + case EXISTING_STORE: + case PEER: + forceNewHistoryUUID = false; + break; + case EMPTY_STORE: + case SNAPSHOT: + case LOCAL_SHARDS: + forceNewHistoryUUID = true; + break; + default: + throw new AssertionError("unknown recovery type: [" + shardRouting.recoverySource().getType() + "]"); + } return new EngineConfig(openMode, shardId, shardRouting.allocationId().getId(), threadPool, indexSettings, warmer, store, indexSettings.getMergePolicy(), mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, shardEventListener, - indexCache.query(), cachingPolicy, translogConfig, + indexCache.query(), cachingPolicy, forceNewHistoryUUID, translogConfig, IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()), Arrays.asList(refreshListeners, new RefreshMetricUpdater(refreshMetric)), indexSort, this::runTranslogRecovery); diff --git a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 63b7bc0805581..e5053fc7882e0 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -35,12 +35,10 @@ import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource; -import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.Index; -import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.mapper.MapperService; @@ -164,11 +162,10 @@ void addIndices( * document-level semantics. */ writer.setLiveCommitData(() -> { - final HashMap liveCommitData = new HashMap<>(4); + final HashMap liveCommitData = new HashMap<>(3); liveCommitData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(maxSeqNo)); liveCommitData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(maxSeqNo)); liveCommitData.put(InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, Long.toString(maxUnsafeAutoIdTimestamp)); - liveCommitData.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()); return liveCommitData.entrySet().iterator(); }); writer.commit(); diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 8b78227ca3121..5971cd3877493 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.engine; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -270,8 +269,8 @@ public EngineConfig copy(EngineConfig config, EngineConfig.OpenMode openMode, An return new EngineConfig(openMode, config.getShardId(), config.getAllocationId(), config.getThreadPool(), config.getIndexSettings(), config.getWarmer(), config.getStore(), config.getMergePolicy(), analyzer, config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), - config.getTranslogConfig(), config.getFlushMergesAfter(), config.getRefreshListeners(), config.getIndexSort(), - config.getTranslogRecoveryRunner()); + config.getForceNewHistoryUUID(), config.getTranslogConfig(), config.getFlushMergesAfter(), config.getRefreshListeners(), + config.getIndexSort(), config.getTranslogRecoveryRunner()); } @Override @@ -452,7 +451,7 @@ public void onFailedEngine(String reason, @Nullable Exception e) { refreshListener == null ? emptyList() : Collections.singletonList(refreshListener); EngineConfig config = new EngineConfig(openMode, shardId, allocationId.getId(), threadPool, indexSettings, null, store, mergePolicy, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), listener, - IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, + IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), false, translogConfig, TimeValue.timeValueMinutes(5), refreshListenerList, indexSort, handler); return config; @@ -2796,8 +2795,8 @@ public void testRecoverFromForeignTranslog() throws IOException { EngineConfig brokenConfig = new EngineConfig(EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, shardId, allocationId.getId(), threadPool, config.getIndexSettings(), null, store, newMergePolicy(), config.getAnalyzer(), config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), IndexSearcher.getDefaultQueryCache(), - IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), config.getRefreshListeners(), - null, config.getTranslogRecoveryRunner()); + IndexSearcher.getDefaultQueryCachingPolicy(), false, translogConfig, TimeValue.timeValueMinutes(5), + config.getRefreshListeners(), null, config.getTranslogRecoveryRunner()); try { InternalEngine internalEngine = new InternalEngine(brokenConfig); @@ -2809,7 +2808,7 @@ public void testRecoverFromForeignTranslog() throws IOException { assertVisibleCount(engine, numDocs, false); } - public void testRecoverFromStoreSetsHistoryUUIDIfNeeded() throws IOException { + public void testHistoryUUIDIsSetIfMissing() throws IOException { final int numDocs = randomIntBetween(0, 3); for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); @@ -2842,11 +2841,56 @@ public void testRecoverFromStoreSetsHistoryUUIDIfNeeded() throws IOException { .put(defaultSettings.getSettings()) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_0_0_beta1) .build()); - engine = createEngine(indexSettings, store, primaryTranslogDir, newMergePolicy(), null); - assertVisibleCount(engine, numDocs, false); + + EngineConfig config = engine.config(); + + EngineConfig newConfig = new EngineConfig( + randomBoolean() ? EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG : EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG, + shardId, allocationId.getId(), + threadPool, indexSettings, null, store, newMergePolicy(), config.getAnalyzer(), config.getSimilarity(), + new CodecService(null, logger), config.getEventListener(), IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), false, config.getTranslogConfig(), TimeValue.timeValueMinutes(5), + config.getRefreshListeners(), null, config.getTranslogRecoveryRunner()); + engine = new InternalEngine(newConfig); + if (newConfig.getOpenMode() == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { + engine.recoverFromTranslog(); + assertVisibleCount(engine, numDocs, false); + } else { + assertVisibleCount(engine, 0, false); + } assertThat(engine.getHistoryUUID(), notNullValue()); } + public void testHistoryUUIDCanBeForced() throws IOException { + final int numDocs = randomIntBetween(0, 3); + for (int i = 0; i < numDocs; i++) { + ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); + Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); + Engine.IndexResult index = engine.index(firstIndexRequest); + assertThat(index.getVersion(), equalTo(1L)); + } + assertVisibleCount(engine, numDocs); + final String oldHistoryUUID = engine.getHistoryUUID(); + engine.close(); + EngineConfig config = engine.config(); + + EngineConfig newConfig = new EngineConfig( + randomBoolean() ? EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG : EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG, + shardId, allocationId.getId(), + threadPool, config.getIndexSettings(), null, store, newMergePolicy(), config.getAnalyzer(), config.getSimilarity(), + new CodecService(null, logger), config.getEventListener(), IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), true, config.getTranslogConfig(), TimeValue.timeValueMinutes(5), + config.getRefreshListeners(), null, config.getTranslogRecoveryRunner()); + engine = new InternalEngine(newConfig); + if (newConfig.getOpenMode() == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { + engine.recoverFromTranslog(); + assertVisibleCount(engine, numDocs, false); + } else { + assertVisibleCount(engine, 0, false); + } + assertThat(engine.getHistoryUUID(), not(equalTo(oldHistoryUUID))); + } + public void testShardNotAvailableExceptionWhenEngineClosedConcurrently() throws IOException, InterruptedException { AtomicReference exception = new AtomicReference<>(); String operation = randomFrom("optimize", "refresh", "flush"); diff --git a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 01893a99ae4e3..1f24d0b079dd1 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -119,7 +119,7 @@ public void onFailedEngine(String reason, @Nullable Exception e) { }; EngineConfig config = new EngineConfig(EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG, shardId, allocationId, threadPool, indexSettings, null, store, newMergePolicy(), iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), - eventListener, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, + eventListener, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), false, translogConfig, TimeValue.timeValueMinutes(5), Collections.singletonList(listeners), null, null); engine = new InternalEngine(config); listeners.setTranslog(engine.getTranslog()); diff --git a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 601ca1b8210d3..e2edb33fafa3f 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -36,6 +36,7 @@ import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.ingest.DeletePipelineRequest; @@ -66,6 +67,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidIndexNameException; @@ -119,6 +121,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; @@ -170,8 +173,23 @@ public void testBasicWorkFlow() throws Exception { flushResponseFuture = client.admin().indices().prepareFlush(indices).execute(); } } + + final String[] indicesToSnapshot = {"test-idx-*", "-test-idx-3"}; + + logger.info("--> capturing history UUIDs"); + final Map historyUUIDs = new HashMap<>(); + for (ShardStats shardStats: client().admin().indices().prepareStats(indicesToSnapshot).clear().get().getShards()) { + String historyUUID = shardStats.getCommitStats().getUserData().get(Engine.HISTORY_UUID_KEY); + ShardId shardId = shardStats.getShardRouting().shardId(); + if (historyUUIDs.containsKey(shardId)) { + assertThat(shardStats.getShardRouting() + " has a different history uuid", historyUUID, equalTo(historyUUIDs.get(shardId))); + } else { + historyUUIDs.put(shardId, historyUUID); + } + } + logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get(); + CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices(indicesToSnapshot).get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); @@ -211,6 +229,13 @@ public void testBasicWorkFlow() throws Exception { assertHitCount(client.prepareSearch("test-idx-3").setSize(0).get(), 50L); } + for (ShardStats shardStats: client().admin().indices().prepareStats(indicesToSnapshot).clear().get().getShards()) { + String historyUUID = shardStats.getCommitStats().getUserData().get(Engine.HISTORY_UUID_KEY); + ShardId shardId = shardStats.getShardRouting().shardId(); + assertThat(shardStats.getShardRouting() + " doesn't have a history uuid", historyUUID, notNullValue()); + assertThat(shardStats.getShardRouting() + " doesn't have a new history", historyUUID, not(equalTo(historyUUIDs.get(shardId)))); + } + // Test restore after index deletion logger.info("--> delete indices"); cluster().wipeIndices("test-idx-1", "test-idx-2"); @@ -226,6 +251,13 @@ public void testBasicWorkFlow() throws Exception { assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true)); assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false)); + for (ShardStats shardStats: client().admin().indices().prepareStats(indicesToSnapshot).clear().get().getShards()) { + String historyUUID = shardStats.getCommitStats().getUserData().get(Engine.HISTORY_UUID_KEY); + ShardId shardId = shardStats.getShardRouting().shardId(); + assertThat(shardStats.getShardRouting() + " doesn't have a history uuid", historyUUID, notNullValue()); + assertThat(shardStats.getShardRouting() + " doesn't have a new history", historyUUID, not(equalTo(historyUUIDs.get(shardId)))); + } + if (flushResponseFuture != null) { // Finish flush flushResponseFuture.actionGet(); diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java new file mode 100644 index 0000000000000..26a192cce9e38 --- /dev/null +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -0,0 +1,119 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.upgrades; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Response; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.yaml.ObjectPath; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.notNullValue; + +public class RecoveryIT extends ESRestTestCase { + + @Override + protected boolean preserveIndicesUponCompletion() { + return true; + } + + @Override + protected boolean preserveReposUponCompletion() { + return true; + } + + private enum CLUSTER_TYPE { + OLD, + MIXED, + UPGRADED; + + public static CLUSTER_TYPE parse(String value) { + switch (value) { + case "old_cluster": + return OLD; + case "mixed_cluster": + return MIXED; + case "upgraded_cluster": + return UPGRADED; + default: + throw new AssertionError("unknown cluster type: " + value); + } + } + } + + private final CLUSTER_TYPE clusterType = CLUSTER_TYPE.parse(System.getProperty("tests.rest.suite")); + + private void assertOK(Response response) { + assertThat(response.getStatusLine().getStatusCode(), anyOf(equalTo(200), equalTo(201))); + } + + private void ensureGreen() throws IOException { + Map params = new HashMap<>(); + params.put("wait_for_status", "green"); + params.put("wait_for_no_relocating_shards", "true"); + assertOK(client().performRequest("GET", "_cluster/health", params)); + } + + private void createIndex(String name, Settings settings) throws IOException { + assertOK(client().performRequest("PUT", name, Collections.emptyMap(), + new StringEntity("{ \"settings\": " + Strings.toString(settings) + " }", ContentType.APPLICATION_JSON))); + } + + + public void testHistoryUUIDIsGenerated() throws Exception { + final String index = "index_history_uuid"; + if (clusterType == CLUSTER_TYPE.OLD) { + Settings.Builder settings = Settings.builder() + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1); + createIndex(index, settings.build()); + } else if (clusterType == CLUSTER_TYPE.UPGRADED) { + ensureGreen(); + Response response = client().performRequest("GET", index + "/_stats", Collections.singletonMap("level", "shards")); + assertOK(response); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + List shardStats = objectPath.evaluate("indices." + index + ".shards.0"); + assertThat(shardStats, hasSize(2)); + String expectHistoryUUID = null; + for (int shard = 0; shard < 2; shard++) { + String nodeID = objectPath.evaluate("indices." + index + ".shards.0." + shard + ".routing.node"); + String historyUUID = objectPath.evaluate("indices." + index + ".shards.0." + shard + ".commit.user_data.history_uuid"); + assertThat("no history uuid found for shard on " + nodeID, historyUUID, notNullValue()); + if (expectHistoryUUID == null) { + expectHistoryUUID = historyUUID; + } else { + assertThat("different history uuid found for shard on " + nodeID, historyUUID, equalTo(expectHistoryUUID)); + } + } + } + } + +}