diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index ccd8fea012035..678155c656170 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -16,7 +16,6 @@ - @@ -428,7 +427,6 @@ - diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index fa9980977f4f1..a354bdfb7ba5a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -371,14 +371,14 @@ public void clearScrollAsync(ClearScrollRequest clearScrollRequest, ActionListen listener, emptySet(), headers); } - private Resp performRequestAndParseEntity(Req request, + protected Resp performRequestAndParseEntity(Req request, CheckedFunction requestConverter, CheckedFunction entityParser, Set ignores, Header... headers) throws IOException { return performRequest(request, requestConverter, (response) -> parseEntity(response.getEntity(), entityParser), ignores, headers); } - Resp performRequest(Req request, + protected Resp performRequest(Req request, CheckedFunction requestConverter, CheckedFunction responseConverter, Set ignores, Header... headers) throws IOException { @@ -408,7 +408,7 @@ Resp performRequest(Req request, } } - private void performRequestAsyncAndParseEntity(Req request, + protected void performRequestAsyncAndParseEntity(Req request, CheckedFunction requestConverter, CheckedFunction entityParser, ActionListener listener, Set ignores, Header... headers) { @@ -416,7 +416,7 @@ private void performRequestAsyncAndParseEntity listener, ignores, headers); } - void performRequestAsync(Req request, + protected void performRequestAsync(Req request, CheckedFunction requestConverter, CheckedFunction responseConverter, ActionListener listener, Set ignores, Header... headers) { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java new file mode 100644 index 0000000000000..8ad42c2232020 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java @@ -0,0 +1,181 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.Header; +import org.apache.http.HttpEntity; +import org.apache.http.HttpHost; +import org.apache.http.HttpResponse; +import org.apache.http.ProtocolVersion; +import org.apache.http.RequestLine; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.entity.ByteArrayEntity; +import org.apache.http.entity.ContentType; +import org.apache.http.message.BasicHeader; +import org.apache.http.message.BasicHttpResponse; +import org.apache.http.message.BasicRequestLine; +import org.apache.http.message.BasicStatusLine; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Build; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.main.MainRequest; +import org.elasticsearch.action.main.MainResponse; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.elasticsearch.client.ESRestHighLevelClientTestCase.execute; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyMapOf; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Matchers.anyVararg; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; + +/** + * Test and demonstrates how {@link RestHighLevelClient} can be extended to support custom endpoints. + */ +public class CustomRestHighLevelClientTests extends ESTestCase { + + private static final String ENDPOINT = "/_custom"; + + private CustomRestClient restHighLevelClient; + + @Before + @SuppressWarnings("unchecked") + public void initClients() throws IOException { + if (restHighLevelClient == null) { + final RestClient restClient = mock(RestClient.class); + restHighLevelClient = new CustomRestClient(restClient); + + doAnswer(mock -> mockPerformRequest((Header) mock.getArguments()[4])) + .when(restClient) + .performRequest(eq(HttpGet.METHOD_NAME), eq(ENDPOINT), anyMapOf(String.class, String.class), anyObject(), anyVararg()); + + doAnswer(mock -> mockPerformRequestAsync((Header) mock.getArguments()[5], (ResponseListener) mock.getArguments()[4])) + .when(restClient) + .performRequestAsync(eq(HttpGet.METHOD_NAME), eq(ENDPOINT), anyMapOf(String.class, String.class), + any(HttpEntity.class), any(ResponseListener.class), anyVararg()); + } + } + + public void testCustomEndpoint() throws IOException { + final MainRequest request = new MainRequest(); + final Header header = new BasicHeader("node_name", randomAlphaOfLengthBetween(1, 10)); + + MainResponse response = execute(request, restHighLevelClient::custom, restHighLevelClient::customAsync, header); + assertEquals(header.getValue(), response.getNodeName()); + + response = execute(request, restHighLevelClient::customAndParse, restHighLevelClient::customAndParseAsync, header); + assertEquals(header.getValue(), response.getNodeName()); + } + + /** + * The {@link RestHighLevelClient} must declare the following execution methods using the protected modifier + * so that they can be used by subclasses to implement custom logic. + */ + @SuppressForbidden(reason = "We're forced to uses Class#getDeclaredMethods() here because this test checks protected methods") + public void testMethodsVisibility() throws ClassNotFoundException { + String[] methodNames = new String[]{"performRequest", "performRequestAndParseEntity", "performRequestAsync", + "performRequestAsyncAndParseEntity"}; + for (String methodName : methodNames) { + boolean found = false; + for (Method method : RestHighLevelClient.class.getDeclaredMethods()) { + if (method.getName().equals(methodName)) { + assertTrue("Method " + methodName + " must be protected", Modifier.isProtected(method.getModifiers())); + found = true; + } + } + assertTrue("Failed to find method " + methodName, found); + } + } + + /** + * Mocks the asynchronous request execution by calling the {@link #mockPerformRequest(Header)} method. + */ + private Void mockPerformRequestAsync(Header httpHeader, ResponseListener responseListener) { + try { + responseListener.onSuccess(mockPerformRequest(httpHeader)); + } catch (IOException e) { + responseListener.onFailure(e); + } + return null; + } + + /** + * Mocks the synchronous request execution like if it was executed by Elasticsearch. + */ + private Response mockPerformRequest(Header httpHeader) throws IOException { + ProtocolVersion protocol = new ProtocolVersion("HTTP", 1, 1); + HttpResponse httpResponse = new BasicHttpResponse(new BasicStatusLine(protocol, 200, "OK")); + + MainResponse response = new MainResponse(httpHeader.getValue(), Version.CURRENT, ClusterName.DEFAULT, "_na", Build.CURRENT, true); + BytesRef bytesRef = XContentHelper.toXContent(response, XContentType.JSON, false).toBytesRef(); + httpResponse.setEntity(new ByteArrayEntity(bytesRef.bytes, ContentType.APPLICATION_JSON)); + + RequestLine requestLine = new BasicRequestLine(HttpGet.METHOD_NAME, ENDPOINT, protocol); + return new Response(requestLine, new HttpHost("localhost", 9200), httpResponse); + } + + /** + * A custom high level client that provides custom methods to execute a request and get its associate response back. + */ + static class CustomRestClient extends RestHighLevelClient { + + private CustomRestClient(RestClient restClient) { + super(restClient); + } + + MainResponse custom(MainRequest mainRequest, Header... headers) throws IOException { + return performRequest(mainRequest, this::toRequest, this::toResponse, emptySet(), headers); + } + + MainResponse customAndParse(MainRequest mainRequest, Header... headers) throws IOException { + return performRequestAndParseEntity(mainRequest, this::toRequest, MainResponse::fromXContent, emptySet(), headers); + } + + void customAsync(MainRequest mainRequest, ActionListener listener, Header... headers) { + performRequestAsync(mainRequest, this::toRequest, this::toResponse, listener, emptySet(), headers); + } + + void customAndParseAsync(MainRequest mainRequest, ActionListener listener, Header... headers) { + performRequestAsyncAndParseEntity(mainRequest, this::toRequest, MainResponse::fromXContent, listener, emptySet(), headers); + } + + Request toRequest(MainRequest mainRequest) throws IOException { + return new Request(HttpGet.METHOD_NAME, ENDPOINT, emptyMap(), null); + } + + MainResponse toResponse(Response response) throws IOException { + return parseEntity(response.getEntity(), MainResponse::fromXContent); + } + } +} \ No newline at end of file diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java index 5667053d914b6..328f2ee32f557 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java @@ -174,7 +174,6 @@ public void testSearchWithRangeAgg() throws IOException { assertSearchHeader(searchResponse); assertNull(searchResponse.getSuggest()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); - assertThat(searchResponse.getTook().nanos(), greaterThan(0L)); assertEquals(5, searchResponse.getHits().totalHits); assertEquals(0, searchResponse.getHits().getHits().length); assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); @@ -257,7 +256,6 @@ public void testSearchWithMatrixStats() throws IOException { assertSearchHeader(searchResponse); assertNull(searchResponse.getSuggest()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); - assertThat(searchResponse.getTook().nanos(), greaterThan(0L)); assertEquals(5, searchResponse.getHits().totalHits); assertEquals(0, searchResponse.getHits().getHits().length); assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); @@ -337,7 +335,6 @@ public void testSearchWithParentJoin() throws IOException { assertSearchHeader(searchResponse); assertNull(searchResponse.getSuggest()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); - assertThat(searchResponse.getTook().nanos(), greaterThan(0L)); assertEquals(3, searchResponse.getHits().totalHits); assertEquals(0, searchResponse.getHits().getHits().length); assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f); diff --git a/core/build.gradle b/core/build.gradle index 230fb5a731913..2e2a7fc2fde57 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -81,7 +81,7 @@ dependencies { compile "com.vividsolutions:jts:${versions.jts}", optional // logging - compile "org.apache.logging.log4j:log4j-api:${versions.log4j}", optional + compile "org.apache.logging.log4j:log4j-api:${versions.log4j}" compile "org.apache.logging.log4j:log4j-core:${versions.log4j}", optional // to bridge dependencies that are still on Log4j 1 to Log4j 2 compile "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}", optional diff --git a/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java b/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java index dbad7e0bf721b..cd5da674b8e71 100644 --- a/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java +++ b/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java @@ -296,27 +296,6 @@ public int hashCode() { return Objects.hash(classHash(), Arrays.hashCode(equalsTerms())); } - public static BlendedTermQuery booleanBlendedQuery(Term[] terms) { - return booleanBlendedQuery(terms, null); - } - - public static BlendedTermQuery booleanBlendedQuery(Term[] terms, final float[] boosts) { - return new BlendedTermQuery(terms, boosts) { - @Override - protected Query topLevelQuery(Term[] terms, TermContext[] ctx, int[] docFreqs, int maxDoc) { - BooleanQuery.Builder booleanQueryBuilder = new BooleanQuery.Builder(); - for (int i = 0; i < terms.length; i++) { - Query query = new TermQuery(terms[i], ctx[i]); - if (boosts != null && boosts[i] != 1f) { - query = new BoostQuery(query, boosts[i]); - } - booleanQueryBuilder.add(query, BooleanClause.Occur.SHOULD); - } - return booleanQueryBuilder.build(); - } - }; - } - public static BlendedTermQuery commonTermsBlendedQuery(Term[] terms, final float[] boosts, final float maxTermFrequency) { return new BlendedTermQuery(terms, boosts) { @Override diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java index 947c7cf3ccd0a..07f646a89d1cc 100644 --- a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java +++ b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java @@ -26,6 +26,7 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.FuzzyQuery; @@ -155,31 +156,20 @@ public Query getFieldQuery(String field, String queryText, boolean quoted) throw // if there is no match in the mappings. return new MatchNoDocsQuery("empty fields"); } - if (settings.useDisMax()) { - List queries = new ArrayList<>(); - boolean added = false; - for (String mField : fields) { - Query q = getFieldQuerySingle(mField, queryText, quoted); - if (q != null) { - added = true; - queries.add(applyBoost(mField, q)); - } - } - if (!added) { - return null; - } - return new DisjunctionMaxQuery(queries, settings.tieBreaker()); - } else { - List clauses = new ArrayList<>(); - for (String mField : fields) { - Query q = getFieldQuerySingle(mField, queryText, quoted); - if (q != null) { - clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD)); - } + float tiebreaker = settings.useDisMax() ? settings.tieBreaker() : 1.0f; + List queries = new ArrayList<>(); + boolean added = false; + for (String mField : fields) { + Query q = getFieldQuerySingle(mField, queryText, quoted); + if (q != null) { + added = true; + queries.add(applyBoost(mField, q)); } - if (clauses.isEmpty()) return null; // happens for stopwords - return getBooleanQuery(clauses); } + if (!added) { + return null; + } + return new DisjunctionMaxQuery(queries, tiebreaker); } else { return getFieldQuerySingle(field, queryText, quoted); } @@ -255,33 +245,21 @@ private Query getFieldQuerySingle(String field, String queryText, boolean quoted protected Query getFieldQuery(String field, String queryText, int slop) throws ParseException { Collection fields = extractMultiFields(field); if (fields != null) { - if (settings.useDisMax()) { - List queries = new ArrayList<>(); - boolean added = false; - for (String mField : fields) { - Query q = super.getFieldQuery(mField, queryText, slop); - if (q != null) { - added = true; - q = applySlop(q, slop); - queries.add(applyBoost(mField, q)); - } - } - if (!added) { - return null; - } - return new DisjunctionMaxQuery(queries, settings.tieBreaker()); - } else { - List clauses = new ArrayList<>(); - for (String mField : fields) { - Query q = super.getFieldQuery(mField, queryText, slop); - if (q != null) { - q = applySlop(q, slop); - clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD)); - } + float tiebreaker = settings.useDisMax() ? settings.tieBreaker() : 1.0f; + List queries = new ArrayList<>(); + boolean added = false; + for (String mField : fields) { + Query q = super.getFieldQuery(mField, queryText, slop); + if (q != null) { + added = true; + q = applySlop(q, slop); + queries.add(applyBoost(mField, q)); } - if (clauses.isEmpty()) return null; // happens for stopwords - return getBooleanQuery(clauses); } + if (!added) { + return null; + } + return new DisjunctionMaxQuery(queries, tiebreaker); } else { return super.getFieldQuery(field, queryText, slop); } @@ -308,31 +286,20 @@ protected Query getRangeQuery(String field, String part1, String part2, return getRangeQuerySingle(fields.iterator().next(), part1, part2, startInclusive, endInclusive, context); } - if (settings.useDisMax()) { - List queries = new ArrayList<>(); - boolean added = false; - for (String mField : fields) { - Query q = getRangeQuerySingle(mField, part1, part2, startInclusive, endInclusive, context); - if (q != null) { - added = true; - queries.add(applyBoost(mField, q)); - } + float tiebreaker = settings.useDisMax() ? settings.tieBreaker() : 1.0f; + List queries = new ArrayList<>(); + boolean added = false; + for (String mField : fields) { + Query q = getRangeQuerySingle(mField, part1, part2, startInclusive, endInclusive, context); + if (q != null) { + added = true; + queries.add(applyBoost(mField, q)); } - if (!added) { - return null; - } - return new DisjunctionMaxQuery(queries, settings.tieBreaker()); - } else { - List clauses = new ArrayList<>(); - for (String mField : fields) { - Query q = getRangeQuerySingle(mField, part1, part2, startInclusive, endInclusive, context); - if (q != null) { - clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD)); - } - } - if (clauses.isEmpty()) return null; // happens for stopwords - return getBooleanQuery(clauses); } + if (!added) { + return null; + } + return new DisjunctionMaxQuery(queries, tiebreaker); } private Query getRangeQuerySingle(String field, String part1, String part2, @@ -367,30 +334,20 @@ protected Query getFuzzyQuery(String field, String termStr, String minSimilarity if (fields.size() == 1) { return getFuzzyQuerySingle(fields.iterator().next(), termStr, minSimilarity); } - if (settings.useDisMax()) { - List queries = new ArrayList<>(); - boolean added = false; - for (String mField : fields) { - Query q = getFuzzyQuerySingle(mField, termStr, minSimilarity); - if (q != null) { - added = true; - queries.add(applyBoost(mField, q)); - } - } - if (!added) { - return null; - } - return new DisjunctionMaxQuery(queries, settings.tieBreaker()); - } else { - List clauses = new ArrayList<>(); - for (String mField : fields) { - Query q = getFuzzyQuerySingle(mField, termStr, minSimilarity); - if (q != null) { - clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD)); - } + float tiebreaker = settings.useDisMax() ? settings.tieBreaker() : 1.0f; + List queries = new ArrayList<>(); + boolean added = false; + for (String mField : fields) { + Query q = getFuzzyQuerySingle(mField, termStr, minSimilarity); + if (q != null) { + added = true; + queries.add(applyBoost(mField, q)); } - return getBooleanQuery(clauses); } + if (!added) { + return null; + } + return new DisjunctionMaxQuery(queries, tiebreaker); } else { return getFuzzyQuerySingle(field, termStr, minSimilarity); } @@ -430,31 +387,20 @@ protected Query getPrefixQuery(String field, String termStr) throws ParseExcepti if (fields.size() == 1) { return getPrefixQuerySingle(fields.iterator().next(), termStr); } - if (settings.useDisMax()) { - List queries = new ArrayList<>(); - boolean added = false; - for (String mField : fields) { - Query q = getPrefixQuerySingle(mField, termStr); - if (q != null) { - added = true; - queries.add(applyBoost(mField, q)); - } - } - if (!added) { - return null; - } - return new DisjunctionMaxQuery(queries, settings.tieBreaker()); - } else { - List clauses = new ArrayList<>(); - for (String mField : fields) { - Query q = getPrefixQuerySingle(mField, termStr); - if (q != null) { - clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD)); - } + float tiebreaker = settings.useDisMax() ? settings.tieBreaker() : 1.0f; + List queries = new ArrayList<>(); + boolean added = false; + for (String mField : fields) { + Query q = getPrefixQuerySingle(mField, termStr); + if (q != null) { + added = true; + queries.add(applyBoost(mField, q)); } - if (clauses.isEmpty()) return null; // happens for stopwords - return getBooleanQuery(clauses); } + if (!added) { + return null; + } + return new DisjunctionMaxQuery(queries, tiebreaker); } else { return getPrefixQuerySingle(field, termStr); } @@ -592,31 +538,20 @@ protected Query getWildcardQuery(String field, String termStr) throws ParseExcep if (fields.size() == 1) { return getWildcardQuerySingle(fields.iterator().next(), termStr); } - if (settings.useDisMax()) { - List queries = new ArrayList<>(); - boolean added = false; - for (String mField : fields) { - Query q = getWildcardQuerySingle(mField, termStr); - if (q != null) { - added = true; - queries.add(applyBoost(mField, q)); - } - } - if (!added) { - return null; - } - return new DisjunctionMaxQuery(queries, settings.tieBreaker()); - } else { - List clauses = new ArrayList<>(); - for (String mField : fields) { - Query q = getWildcardQuerySingle(mField, termStr); - if (q != null) { - clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD)); - } + float tiebreaker = settings.useDisMax() ? settings.tieBreaker() : 1.0f; + List queries = new ArrayList<>(); + boolean added = false; + for (String mField : fields) { + Query q = getWildcardQuerySingle(mField, termStr); + if (q != null) { + added = true; + queries.add(applyBoost(mField, q)); } - if (clauses.isEmpty()) return null; // happens for stopwords - return getBooleanQuery(clauses); } + if (!added) { + return null; + } + return new DisjunctionMaxQuery(queries, tiebreaker); } else { return getWildcardQuerySingle(field, termStr); } @@ -656,31 +591,20 @@ protected Query getRegexpQuery(String field, String termStr) throws ParseExcepti if (fields.size() == 1) { return getRegexpQuerySingle(fields.iterator().next(), termStr); } - if (settings.useDisMax()) { - List queries = new ArrayList<>(); - boolean added = false; - for (String mField : fields) { - Query q = getRegexpQuerySingle(mField, termStr); - if (q != null) { - added = true; - queries.add(applyBoost(mField, q)); - } - } - if (!added) { - return null; - } - return new DisjunctionMaxQuery(queries, settings.tieBreaker()); - } else { - List clauses = new ArrayList<>(); - for (String mField : fields) { - Query q = getRegexpQuerySingle(mField, termStr); - if (q != null) { - clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD)); - } + float tiebreaker = settings.useDisMax() ? settings.tieBreaker() : 1.0f; + List queries = new ArrayList<>(); + boolean added = false; + for (String mField : fields) { + Query q = getRegexpQuerySingle(mField, termStr); + if (q != null) { + added = true; + queries.add(applyBoost(mField, q)); } - if (clauses.isEmpty()) return null; // happens for stopwords - return getBooleanQuery(clauses); } + if (!added) { + return null; + } + return new DisjunctionMaxQuery(queries, tiebreaker); } else { return getRegexpQuerySingle(field, termStr); } diff --git a/core/src/main/java/org/apache/lucene/search/grouping/CollapsingDocValuesSource.java b/core/src/main/java/org/apache/lucene/search/grouping/CollapsingDocValuesSource.java index e48773389021c..cbcd1e3a4117d 100644 --- a/core/src/main/java/org/apache/lucene/search/grouping/CollapsingDocValuesSource.java +++ b/core/src/main/java/org/apache/lucene/search/grouping/CollapsingDocValuesSource.java @@ -40,7 +40,7 @@ abstract class CollapsingDocValuesSource extends GroupSelector { protected final String field; - CollapsingDocValuesSource(String field) throws IOException { + CollapsingDocValuesSource(String field) { this.field = field; } @@ -58,7 +58,7 @@ static class Numeric extends CollapsingDocValuesSource { private long value; private boolean hasValue; - Numeric(String field) throws IOException { + Numeric(String field) { super(field); } @@ -148,7 +148,7 @@ static class Keyword extends CollapsingDocValuesSource { private SortedDocValues values; private int ord; - Keyword(String field) throws IOException { + Keyword(String field) { super(field); } diff --git a/core/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java b/core/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java index b5cb02bcd6536..fedda3ead596b 100644 --- a/core/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java +++ b/core/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java @@ -46,7 +46,7 @@ public final class CollapsingTopDocsCollector extends FirstPassGroupingCollec private final boolean trackMaxScore; CollapsingTopDocsCollector(GroupSelector groupSelector, String collapseField, Sort sort, - int topN, boolean trackMaxScore) throws IOException { + int topN, boolean trackMaxScore) { super(groupSelector, sort, topN); this.collapseField = collapseField; this.trackMaxScore = trackMaxScore; @@ -60,7 +60,7 @@ public final class CollapsingTopDocsCollector extends FirstPassGroupingCollec /** * Transform {@link FirstPassGroupingCollector#getTopGroups(int, boolean)} output in - * {@link CollapseTopFieldDocs}. The collapsing needs only one pass so we can create the final top docs at the end + * {@link CollapseTopFieldDocs}. The collapsing needs only one pass so we can get the final top docs at the end * of the first pass. */ public CollapseTopFieldDocs getTopDocs() throws IOException { @@ -132,10 +132,9 @@ public void collect(int doc) throws IOException { * This must be non-null, ie, if you want to groupSort by relevance * use Sort.RELEVANCE. * @param topN How many top groups to keep. - * @throws IOException When I/O related errors occur */ public static CollapsingTopDocsCollector createNumeric(String collapseField, Sort sort, - int topN, boolean trackMaxScore) throws IOException { + int topN, boolean trackMaxScore) { return new CollapsingTopDocsCollector<>(new CollapsingDocValuesSource.Numeric(collapseField), collapseField, sort, topN, trackMaxScore); } @@ -152,12 +151,10 @@ public static CollapsingTopDocsCollector createNumeric(String collapseField, * document per collapsed key. * This must be non-null, ie, if you want to groupSort by relevance use Sort.RELEVANCE. * @param topN How many top groups to keep. - * @throws IOException When I/O related errors occur */ public static CollapsingTopDocsCollector createKeyword(String collapseField, Sort sort, - int topN, boolean trackMaxScore) throws IOException { + int topN, boolean trackMaxScore) { return new CollapsingTopDocsCollector<>(new CollapsingDocValuesSource.Keyword(collapseField), collapseField, sort, topN, trackMaxScore); } } - diff --git a/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatter.java b/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatter.java deleted file mode 100644 index a33bf16dee4c7..0000000000000 --- a/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatter.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.lucene.search.postingshighlight; - -import org.apache.lucene.search.highlight.Snippet; -import org.apache.lucene.search.highlight.Encoder; -import org.elasticsearch.search.fetch.subphase.highlight.HighlightUtils; - -/** -Custom passage formatter that allows us to: -1) extract different snippets (instead of a single big string) together with their scores ({@link Snippet}) -2) use the {@link Encoder} implementations that are already used with the other highlighters - */ -public class CustomPassageFormatter extends PassageFormatter { - - private final String preTag; - private final String postTag; - private final Encoder encoder; - - public CustomPassageFormatter(String preTag, String postTag, Encoder encoder) { - this.preTag = preTag; - this.postTag = postTag; - this.encoder = encoder; - } - - @Override - public Snippet[] format(Passage[] passages, String content) { - Snippet[] snippets = new Snippet[passages.length]; - int pos; - for (int j = 0; j < passages.length; j++) { - Passage passage = passages[j]; - StringBuilder sb = new StringBuilder(); - pos = passage.getStartOffset(); - for (int i = 0; i < passage.getNumMatches(); i++) { - int start = passage.getMatchStarts()[i]; - int end = passage.getMatchEnds()[i]; - // its possible to have overlapping terms - if (start > pos) { - append(sb, content, pos, start); - } - if (end > pos) { - sb.append(preTag); - append(sb, content, Math.max(pos, start), end); - sb.append(postTag); - pos = end; - } - } - // its possible a "term" from the analyzer could span a sentence boundary. - append(sb, content, pos, Math.max(pos, passage.getEndOffset())); - //we remove the paragraph separator if present at the end of the snippet (we used it as separator between values) - if (sb.charAt(sb.length() - 1) == HighlightUtils.PARAGRAPH_SEPARATOR) { - sb.deleteCharAt(sb.length() - 1); - } else if (sb.charAt(sb.length() - 1) == HighlightUtils.NULL_SEPARATOR) { - sb.deleteCharAt(sb.length() - 1); - } - //and we trim the snippets too - snippets[j] = new Snippet(sb.toString().trim(), passage.getScore(), passage.getNumMatches() > 0); - } - return snippets; - } - - protected void append(StringBuilder dest, String content, int start, int end) { - dest.append(encoder.encodeText(content.substring(start, end))); - } -} diff --git a/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java b/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java deleted file mode 100644 index ac90a3e57aee7..0000000000000 --- a/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.lucene.search.postingshighlight; - -import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.highlight.Snippet; - -import java.io.IOException; -import java.text.BreakIterator; -import java.util.Map; - -/** - * Subclass of the {@link PostingsHighlighter} that works for a single field in a single document. - * Uses a custom {@link PassageFormatter}. Accepts field content as a constructor argument, given that loading - * is custom and can be done reading from _source field. Supports using different {@link BreakIterator} to break - * the text into fragments. Considers every distinct field value as a discrete passage for highlighting (unless - * the whole content needs to be highlighted). Supports both returning empty snippets and non highlighted snippets - * when no highlighting can be performed. - * - * The use that we make of the postings highlighter is not optimal. It would be much better to highlight - * multiple docs in a single call, as we actually lose its sequential IO. That would require to - * refactor the elasticsearch highlight api which currently works per hit. - */ -public final class CustomPostingsHighlighter extends PostingsHighlighter { - - private static final Snippet[] EMPTY_SNIPPET = new Snippet[0]; - private static final Passage[] EMPTY_PASSAGE = new Passage[0]; - - private final Analyzer analyzer; - private final CustomPassageFormatter passageFormatter; - private final BreakIterator breakIterator; - private final boolean returnNonHighlightedSnippets; - private final String fieldValue; - - /** - * Creates a new instance of {@link CustomPostingsHighlighter} - * - * @param analyzer the analyzer used for the field at index time, used for multi term queries internally - * @param passageFormatter our own {@link PassageFormatter} which generates snippets in forms of {@link Snippet} objects - * @param fieldValue the original field values as constructor argument, loaded from te _source field or the relevant stored field. - * @param returnNonHighlightedSnippets whether non highlighted snippets should be returned rather than empty snippets when - * no highlighting can be performed - */ - public CustomPostingsHighlighter(Analyzer analyzer, CustomPassageFormatter passageFormatter, String fieldValue, boolean returnNonHighlightedSnippets) { - this(analyzer, passageFormatter, null, fieldValue, returnNonHighlightedSnippets); - } - - /** - * Creates a new instance of {@link CustomPostingsHighlighter} - * - * @param analyzer the analyzer used for the field at index time, used for multi term queries internally - * @param passageFormatter our own {@link PassageFormatter} which generates snippets in forms of {@link Snippet} objects - * @param breakIterator an instance {@link BreakIterator} selected depending on the highlighting options - * @param fieldValue the original field values as constructor argument, loaded from te _source field or the relevant stored field. - * @param returnNonHighlightedSnippets whether non highlighted snippets should be returned rather than empty snippets when - * no highlighting can be performed - */ - public CustomPostingsHighlighter(Analyzer analyzer, CustomPassageFormatter passageFormatter, BreakIterator breakIterator, String fieldValue, boolean returnNonHighlightedSnippets) { - this.analyzer = analyzer; - this.passageFormatter = passageFormatter; - this.breakIterator = breakIterator; - this.returnNonHighlightedSnippets = returnNonHighlightedSnippets; - this.fieldValue = fieldValue; - } - - /** - * Highlights terms extracted from the provided query within the content of the provided field name - */ - public Snippet[] highlightField(String field, Query query, IndexSearcher searcher, int docId, int maxPassages) throws IOException { - Map fieldsAsObjects = super.highlightFieldsAsObjects(new String[]{field}, query, searcher, new int[]{docId}, new int[]{maxPassages}); - Object[] snippetObjects = fieldsAsObjects.get(field); - if (snippetObjects != null) { - //one single document at a time - assert snippetObjects.length == 1; - Object snippetObject = snippetObjects[0]; - if (snippetObject != null && snippetObject instanceof Snippet[]) { - return (Snippet[]) snippetObject; - } - } - return EMPTY_SNIPPET; - } - - @Override - protected PassageFormatter getFormatter(String field) { - return passageFormatter; - } - - @Override - protected BreakIterator getBreakIterator(String field) { - if (breakIterator == null) { - return super.getBreakIterator(field); - } - return breakIterator; - } - - /* - By default the postings highlighter returns non highlighted snippet when there are no matches. - We want to return no snippets by default, unless no_match_size is greater than 0 - */ - @Override - protected Passage[] getEmptyHighlight(String fieldName, BreakIterator bi, int maxPassages) { - if (returnNonHighlightedSnippets) { - //we want to return the first sentence of the first snippet only - return super.getEmptyHighlight(fieldName, bi, 1); - } - return EMPTY_PASSAGE; - } - - @Override - protected Analyzer getIndexAnalyzer(String field) { - return analyzer; - } - - @Override - protected String[][] loadFieldValues(IndexSearcher searcher, String[] fields, int[] docids, int maxLength) throws IOException { - //we only highlight one field, one document at a time - return new String[][]{new String[]{fieldValue}}; - } -} diff --git a/core/src/main/java/org/apache/lucene/search/uhighlight/CustomPassageFormatter.java b/core/src/main/java/org/apache/lucene/search/uhighlight/CustomPassageFormatter.java index 7a34a805db623..52eee559c6888 100644 --- a/core/src/main/java/org/apache/lucene/search/uhighlight/CustomPassageFormatter.java +++ b/core/src/main/java/org/apache/lucene/search/uhighlight/CustomPassageFormatter.java @@ -20,7 +20,6 @@ package org.apache.lucene.search.uhighlight; import org.apache.lucene.search.highlight.Encoder; -import org.apache.lucene.search.highlight.Snippet; import org.elasticsearch.search.fetch.subphase.highlight.HighlightUtils; /** diff --git a/core/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java b/core/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java index b6d6f1d1a4dae..ebc13298202a6 100644 --- a/core/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java +++ b/core/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.highlight.Snippet; import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper; import org.apache.lucene.search.spans.SpanNearQuery; import org.apache.lucene.search.spans.SpanOrQuery; diff --git a/core/src/main/java/org/apache/lucene/search/highlight/Snippet.java b/core/src/main/java/org/apache/lucene/search/uhighlight/Snippet.java similarity index 90% rename from core/src/main/java/org/apache/lucene/search/highlight/Snippet.java rename to core/src/main/java/org/apache/lucene/search/uhighlight/Snippet.java index 81a3d406ea346..b7490c55feffa 100644 --- a/core/src/main/java/org/apache/lucene/search/highlight/Snippet.java +++ b/core/src/main/java/org/apache/lucene/search/uhighlight/Snippet.java @@ -17,11 +17,11 @@ * under the License. */ -package org.apache.lucene.search.highlight; +package org.apache.lucene.search.uhighlight; /** * Represents a scored highlighted snippet. - * It's our own arbitrary object that we get back from the postings highlighter when highlighting a document. + * It's our own arbitrary object that we get back from the unified highlighter when highlighting a document. * Every snippet contains its formatted text and its score. * The score is needed in case we want to sort snippets by score, they get sorted by position in the text by default. */ diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchException.java b/core/src/main/java/org/elasticsearch/ElasticsearchException.java index ae006045e3d47..7c20ed7d2c482 100644 --- a/core/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/core/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -829,8 +829,7 @@ private enum ElasticsearchExceptionHandle { org.elasticsearch.transport.SendRequestTransportException::new, 58, UNKNOWN_VERSION_ADDED), ES_REJECTED_EXECUTION_EXCEPTION(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException.class, org.elasticsearch.common.util.concurrent.EsRejectedExecutionException::new, 59, UNKNOWN_VERSION_ADDED), - EARLY_TERMINATION_EXCEPTION(org.elasticsearch.common.lucene.Lucene.EarlyTerminationException.class, - org.elasticsearch.common.lucene.Lucene.EarlyTerminationException::new, 60, UNKNOWN_VERSION_ADDED), + // 60 used to be for EarlyTerminationException // 61 used to be for RoutingValidationException NOT_SERIALIZABLE_EXCEPTION_WRAPPER(org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.class, org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper::new, 62, UNKNOWN_VERSION_ADDED), diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java index 2dc0ed870c025..d543ac67e1d91 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java @@ -81,7 +81,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_5_3_0)) { source.writeTo(out); } else { - out.writeString(source.getCode()); + out.writeString(source.getSource()); } } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java index 35dd53276cd6d..7d948e7137ebf 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.admin.indices.create; +import org.elasticsearch.Version; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -32,14 +33,16 @@ public class CreateIndexResponse extends AcknowledgedResponse { private boolean shardsAcked; + private String index; protected CreateIndexResponse() { } - protected CreateIndexResponse(boolean acknowledged, boolean shardsAcked) { + protected CreateIndexResponse(boolean acknowledged, boolean shardsAcked, String index) { super(acknowledged); assert acknowledged || shardsAcked == false; // if its not acknowledged, then shards acked should be false too this.shardsAcked = shardsAcked; + this.index = index; } @Override @@ -47,6 +50,9 @@ public void readFrom(StreamInput in) throws IOException { super.readFrom(in); readAcknowledged(in); shardsAcked = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_5_6_0)) { + index = in.readString(); + } } @Override @@ -54,6 +60,9 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); writeAcknowledged(out); out.writeBoolean(shardsAcked); + if (out.getVersion().onOrAfter(Version.V_5_6_0)) { + out.writeString(index); + } } /** @@ -65,7 +74,12 @@ public boolean isShardsAcked() { return shardsAcked; } + public String index() { + return index; + } + public void addCustomFields(XContentBuilder builder) throws IOException { builder.field("shards_acknowledged", isShardsAcked()); + builder.field("index", index()); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index 354dcf2387345..0ac8d02f97760 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -79,7 +79,7 @@ protected void masterOperation(final CreateIndexRequest request, final ClusterSt .waitForActiveShards(request.waitForActiveShards()); createIndexService.createIndex(updateRequest, ActionListener.wrap(response -> - listener.onResponse(new CreateIndexResponse(response.isAcknowledged(), response.isShardsAcked())), + listener.onResponse(new CreateIndexResponse(response.isAcknowledged(), response.isShardsAcked(), indexName)), listener::onFailure)); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkResponse.java index e7ad0afe3aa17..0c5149f6bf353 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkResponse.java @@ -25,7 +25,7 @@ public final class ShrinkResponse extends CreateIndexResponse { ShrinkResponse() { } - ShrinkResponse(boolean acknowledged, boolean shardsAcked) { - super(acknowledged, shardsAcked); + ShrinkResponse(boolean acknowledged, boolean shardsAcked, String index) { + super(acknowledged, shardsAcked, index); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java index 8c482eac10cfc..2555299709cda 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java @@ -91,8 +91,13 @@ public void onResponse(IndicesStatsResponse indicesStatsResponse) { IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i); return shard == null ? null : shard.getPrimary().getDocs(); }, indexNameExpressionResolver); - createIndexService.createIndex(updateRequest, ActionListener.wrap(response -> - listener.onResponse(new ShrinkResponse(response.isAcknowledged(), response.isShardsAcked())), listener::onFailure)); + createIndexService.createIndex( + updateRequest, + ActionListener.wrap(response -> + listener.onResponse(new ShrinkResponse(response.isAcknowledged(), response.isShardsAcked(), updateRequest.index())), + listener::onFailure + ) + ); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 597b27eae4bd2..7a2c5eb02222a 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -477,7 +477,7 @@ public static Translog.Location performOnReplica(BulkShardRequest request, Index case FAILURE: final BulkItemResponse.Failure failure = item.getPrimaryResponse().getFailure(); assert failure.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO : "seq no must be assigned"; - operationResult = executeFailureNoOpOnReplica(failure, replica); + operationResult = executeFailureNoOpOnReplica(failure, primaryTerm, replica); assert operationResult != null : "operation result must never be null when primary response has no failure"; location = syncOperationResultOrThrow(operationResult, location); break; @@ -673,9 +673,10 @@ private static Engine.DeleteResult executeDeleteRequestOnReplica(DocWriteRespons return replica.delete(delete); } - private static Engine.NoOpResult executeFailureNoOpOnReplica(BulkItemResponse.Failure primaryFailure, IndexShard replica) throws IOException { - final Engine.NoOp noOp = replica.prepareMarkingSeqNoAsNoOp( - primaryFailure.getSeqNo(), primaryFailure.getMessage()); + private static Engine.NoOpResult executeFailureNoOpOnReplica(BulkItemResponse.Failure primaryFailure, long primaryTerm, + IndexShard replica) throws IOException { + final Engine.NoOp noOp = replica.prepareMarkingSeqNoAsNoOpOnReplica( + primaryFailure.getSeqNo(), primaryTerm, primaryFailure.getMessage()); return replica.markSeqNoAsNoOp(noOp); } diff --git a/core/src/main/java/org/elasticsearch/action/search/ClearScrollController.java b/core/src/main/java/org/elasticsearch/action/search/ClearScrollController.java new file mode 100644 index 0000000000000..d94fe1a2bbe6b --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/search/ClearScrollController.java @@ -0,0 +1,141 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.search; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportResponse; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.elasticsearch.action.search.TransportSearchHelper.parseScrollId; + +final class ClearScrollController implements Runnable { + private final DiscoveryNodes nodes; + private final SearchTransportService searchTransportService; + private final CountDown expectedOps; + private final ActionListener listener; + private final AtomicBoolean hasFailed = new AtomicBoolean(false); + private final AtomicInteger freedSearchContexts = new AtomicInteger(0); + private final Logger logger; + private final Runnable runner; + + ClearScrollController(ClearScrollRequest request, ActionListener listener, DiscoveryNodes nodes, Logger logger, + SearchTransportService searchTransportService) { + this.nodes = nodes; + this.logger = logger; + this.searchTransportService = searchTransportService; + this.listener = listener; + List scrollIds = request.getScrollIds(); + final int expectedOps; + if (scrollIds.size() == 1 && "_all".equals(scrollIds.get(0))) { + expectedOps = nodes.getSize(); + runner = this::cleanAllScrolls; + } else { + List parsedScrollIds = new ArrayList<>(); + for (String parsedScrollId : request.getScrollIds()) { + ScrollIdForNode[] context = parseScrollId(parsedScrollId).getContext(); + for (ScrollIdForNode id : context) { + parsedScrollIds.add(id); + } + } + if (parsedScrollIds.isEmpty()) { + expectedOps = 0; + runner = () -> listener.onResponse(new ClearScrollResponse(true, 0)); + } else { + expectedOps = parsedScrollIds.size(); + runner = () -> cleanScrollIds(parsedScrollIds); + } + } + this.expectedOps = new CountDown(expectedOps); + + } + + @Override + public void run() { + runner.run(); + } + + void cleanAllScrolls() { + for (final DiscoveryNode node : nodes) { + try { + Transport.Connection connection = searchTransportService.getConnection(null, node); + searchTransportService.sendClearAllScrollContexts(connection, new ActionListener() { + @Override + public void onResponse(TransportResponse response) { + onFreedContext(true); + } + + @Override + public void onFailure(Exception e) { + onFailedFreedContext(e, node); + } + }); + } catch (Exception e) { + onFailedFreedContext(e, node); + } + } + } + + void cleanScrollIds(List parsedScrollIds) { + for (ScrollIdForNode target : parsedScrollIds) { + final DiscoveryNode node = nodes.get(target.getNode()); + if (node == null) { + onFreedContext(false); + } else { + try { + Transport.Connection connection = searchTransportService.getConnection(null, node); + searchTransportService.sendFreeContext(connection, target.getScrollId(), + ActionListener.wrap(freed -> onFreedContext(freed.isFreed()), + e -> onFailedFreedContext(e, node))); + } catch (Exception e) { + onFailedFreedContext(e, node); + } + } + } + } + + private void onFreedContext(boolean freed) { + if (freed) { + freedSearchContexts.incrementAndGet(); + } + if (expectedOps.countDown()) { + boolean succeeded = hasFailed.get() == false; + listener.onResponse(new ClearScrollResponse(succeeded, freedSearchContexts.get())); + } + } + + private void onFailedFreedContext(Throwable e, DiscoveryNode node) { + logger.warn((Supplier) () -> new ParameterizedMessage("Clear SC failed on node[{}]", node), e); + if (expectedOps.countDown()) { + listener.onResponse(new ClearScrollResponse(false, freedSearchContexts.get())); + } else { + hasFailed.set(true); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index e1e0205e7e518..879607d059e80 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -405,9 +405,18 @@ private SearchHits getHits(ReducedQueryPhase reducedQueryPhase, boolean ignoreFr * @param queryResults a list of non-null query shard results */ public ReducedQueryPhase reducedQueryPhase(Collection queryResults, boolean isScrollRequest) { - return reducedQueryPhase(queryResults, null, new ArrayList<>(), new TopDocsStats(), 0, isScrollRequest); + return reducedQueryPhase(queryResults, isScrollRequest, true); } + /** + * Reduces the given query results and consumes all aggregations and profile results. + * @param queryResults a list of non-null query shard results + */ + public ReducedQueryPhase reducedQueryPhase(Collection queryResults, boolean isScrollRequest, boolean trackTotalHits) { + return reducedQueryPhase(queryResults, null, new ArrayList<>(), new TopDocsStats(trackTotalHits), 0, isScrollRequest); + } + + /** * Reduces the given query results and consumes all aggregations and profile results. * @param queryResults a list of non-null query shard results @@ -711,6 +720,7 @@ InitialSearchPhase.SearchPhaseResults newSearchPhaseResults(S boolean isScrollRequest = request.scroll() != null; final boolean hasAggs = source != null && source.aggregations() != null; final boolean hasTopDocs = source == null || source.size() != 0; + final boolean trackTotalHits = source == null || source.trackTotalHits(); if (isScrollRequest == false && (hasAggs || hasTopDocs)) { // no incremental reduce if scroll is used - we only hit a single shard or sometimes more... @@ -722,18 +732,30 @@ InitialSearchPhase.SearchPhaseResults newSearchPhaseResults(S return new InitialSearchPhase.SearchPhaseResults(numShards) { @Override public ReducedQueryPhase reduce() { - return reducedQueryPhase(results.asList(), isScrollRequest); + return reducedQueryPhase(results.asList(), isScrollRequest, trackTotalHits); } }; } static final class TopDocsStats { + final boolean trackTotalHits; long totalHits; long fetchHits; float maxScore = Float.NEGATIVE_INFINITY; + TopDocsStats() { + this(true); + } + + TopDocsStats(boolean trackTotalHits) { + this.trackTotalHits = trackTotalHits; + this.totalHits = trackTotalHits ? 0 : -1; + } + void add(TopDocs topDocs) { - totalHits += topDocs.totalHits; + if (trackTotalHits) { + totalHits += topDocs.totalHits; + } fetchHits += topDocs.scoreDocs.length; if (!Float.isNaN(topDocs.getMaxScore())) { maxScore = Math.max(maxScore, topDocs.getMaxScore()); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 9e35cca05b94f..01a3e94620a46 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -39,6 +39,8 @@ import java.util.Collections; import java.util.Objects; +import static org.elasticsearch.action.ValidateActions.addValidationError; + /** * A request to execute search against one or more indices (or all). Best created using * {@link org.elasticsearch.client.Requests#searchRequest(String...)}. @@ -102,7 +104,12 @@ public SearchRequest(String[] indices, SearchSourceBuilder source) { @Override public ActionRequestValidationException validate() { - return null; + ActionRequestValidationException validationException = null; + if (source != null && source.trackTotalHits() == false && scroll() != null) { + validationException = + addValidationError("disabling [track_total_hits] is not allowed in a scroll context", validationException); + } + return validationException; } /** diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index ffe2c1b20c516..0333092b91755 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -363,14 +363,21 @@ public SearchRequestBuilder slice(SliceBuilder builder) { } /** - * Applies when sorting, and controls if scores will be tracked as well. Defaults to - * false. + * Applies when sorting, and controls if scores will be tracked as well. Defaults to false. */ public SearchRequestBuilder setTrackScores(boolean trackScores) { sourceBuilder().trackScores(trackScores); return this; } + /** + * Indicates if the total hit count for the query should be tracked. Defaults to true + */ + public SearchRequestBuilder setTrackTotalHits(boolean trackTotalHits) { + sourceBuilder().trackTotalHits(trackTotalHits); + return this; + } + /** * Adds stored fields to load and return (note, it must be stored) as part of the search request. * To disable the stored fields entirely (source and metadata fields) use {@code storedField("_none_")}. diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java index 90ee1dfd434c5..3aa5e3a2adbc6 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -45,8 +45,6 @@ import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; -import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField; -import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownToken; /** @@ -245,7 +243,7 @@ public static SearchResponse fromXContent(XContentParser parser) throws IOExcept } else if (NUM_REDUCE_PHASES.match(currentFieldName)) { numReducePhases = parser.intValue(); } else { - throwUnknownField(currentFieldName, parser.getTokenLocation()); + parser.skipChildren(); } } else if (token == XContentParser.Token.START_OBJECT) { if (SearchHits.Fields.HITS.equals(currentFieldName)) { @@ -268,7 +266,7 @@ public static SearchResponse fromXContent(XContentParser parser) throws IOExcept } else if (RestActions.TOTAL_FIELD.match(currentFieldName)) { totalShards = parser.intValue(); } else { - throwUnknownField(currentFieldName, parser.getTokenLocation()); + parser.skipChildren(); } } else if (token == XContentParser.Token.START_ARRAY) { if (RestActions.FAILURES_FIELD.match(currentFieldName)) { @@ -276,14 +274,14 @@ public static SearchResponse fromXContent(XContentParser parser) throws IOExcept failures.add(ShardSearchFailure.fromXContent(parser)); } } else { - throwUnknownField(currentFieldName, parser.getTokenLocation()); + parser.skipChildren(); } } else { - throwUnknownToken(token, parser.getTokenLocation()); + parser.skipChildren(); } } } else { - throwUnknownField(currentFieldName, parser.getTokenLocation()); + parser.skipChildren(); } } } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 9dd2125d5e2fe..2d20d383288f4 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -98,14 +98,14 @@ public void onFailure(Exception e) { }, SearchFreeContextResponse::new)); } - public void sendFreeContext(DiscoveryNode node, long contextId, final ActionListener listener) { - transportService.sendRequest(node, FREE_CONTEXT_SCROLL_ACTION_NAME, new ScrollFreeContextRequest(contextId), - new ActionListenerResponseHandler<>(listener, SearchFreeContextResponse::new)); + public void sendFreeContext(Transport.Connection connection, long contextId, final ActionListener listener) { + transportService.sendRequest(connection, FREE_CONTEXT_SCROLL_ACTION_NAME, new ScrollFreeContextRequest(contextId), + TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener, SearchFreeContextResponse::new)); } - public void sendClearAllScrollContexts(DiscoveryNode node, final ActionListener listener) { - transportService.sendRequest(node, CLEAR_SCROLL_CONTEXTS_ACTION_NAME, TransportRequest.Empty.INSTANCE, - new ActionListenerResponseHandler<>(listener, () -> TransportResponse.Empty.INSTANCE)); + public void sendClearAllScrollContexts(Transport.Connection connection, final ActionListener listener) { + transportService.sendRequest(connection, CLEAR_SCROLL_CONTEXTS_ACTION_NAME, TransportRequest.Empty.INSTANCE, + TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener, () -> TransportResponse.Empty.INSTANCE)); } public void sendExecuteDfs(Transport.Connection connection, final ShardSearchTransportRequest request, SearchTask task, diff --git a/core/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java b/core/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java index 8fed61af29412..7eb939ca8274e 100644 --- a/core/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java +++ b/core/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java @@ -38,8 +38,6 @@ import java.io.IOException; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; -import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField; -import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownToken; /** * Represents a failure to search on a specific shard. @@ -200,16 +198,16 @@ public static ShardSearchFailure fromXContent(XContentParser parser) throws IOEx } else if (NODE_FIELD.equals(currentFieldName)) { nodeId = parser.text(); } else { - throwUnknownField(currentFieldName, parser.getTokenLocation()); + parser.skipChildren(); } } else if (token == XContentParser.Token.START_OBJECT) { if (REASON_FIELD.equals(currentFieldName)) { exception = ElasticsearchException.fromXContent(parser); } else { - throwUnknownField(currentFieldName, parser.getTokenLocation()); + parser.skipChildren(); } } else { - throwUnknownToken(token, parser.getTokenLocation()); + parser.skipChildren(); } } return new ShardSearchFailure(exception, diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java index 716077c915d6b..d9afbdacafe3c 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java @@ -19,30 +19,16 @@ package org.elasticsearch.action.search; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; - -import static org.elasticsearch.action.search.TransportSearchHelper.parseScrollId; - public class TransportClearScrollAction extends HandledTransportAction { private final ClusterService clusterService; @@ -53,105 +39,16 @@ public TransportClearScrollAction(Settings settings, TransportService transportS ClusterService clusterService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, SearchTransportService searchTransportService) { - super(settings, ClearScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, ClearScrollRequest::new); + super(settings, ClearScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, + ClearScrollRequest::new); this.clusterService = clusterService; this.searchTransportService = searchTransportService; } @Override protected void doExecute(ClearScrollRequest request, final ActionListener listener) { - new Async(request, listener, clusterService.state()).run(); - } - - private class Async { - final DiscoveryNodes nodes; - final CountDown expectedOps; - final List contexts = new ArrayList<>(); - final ActionListener listener; - final AtomicReference expHolder; - final AtomicInteger numberOfFreedSearchContexts = new AtomicInteger(0); - - private Async(ClearScrollRequest request, ActionListener listener, ClusterState clusterState) { - int expectedOps = 0; - this.nodes = clusterState.nodes(); - if (request.getScrollIds().size() == 1 && "_all".equals(request.getScrollIds().get(0))) { - expectedOps = nodes.getSize(); - } else { - for (String parsedScrollId : request.getScrollIds()) { - ScrollIdForNode[] context = parseScrollId(parsedScrollId).getContext(); - expectedOps += context.length; - this.contexts.add(context); - } - } - this.listener = listener; - this.expHolder = new AtomicReference<>(); - this.expectedOps = new CountDown(expectedOps); - } - - public void run() { - if (expectedOps.isCountedDown()) { - listener.onResponse(new ClearScrollResponse(true, 0)); - return; - } - - if (contexts.isEmpty()) { - for (final DiscoveryNode node : nodes) { - searchTransportService.sendClearAllScrollContexts(node, new ActionListener() { - @Override - public void onResponse(TransportResponse response) { - onFreedContext(true); - } - - @Override - public void onFailure(Exception e) { - onFailedFreedContext(e, node); - } - }); - } - } else { - for (ScrollIdForNode[] context : contexts) { - for (ScrollIdForNode target : context) { - final DiscoveryNode node = nodes.get(target.getNode()); - if (node == null) { - onFreedContext(false); - continue; - } - - searchTransportService.sendFreeContext(node, target.getScrollId(), new ActionListener() { - @Override - public void onResponse(SearchTransportService.SearchFreeContextResponse freed) { - onFreedContext(freed.isFreed()); - } - - @Override - public void onFailure(Exception e) { - onFailedFreedContext(e, node); - } - }); - } - } - } - } - - void onFreedContext(boolean freed) { - if (freed) { - numberOfFreedSearchContexts.incrementAndGet(); - } - if (expectedOps.countDown()) { - boolean succeeded = expHolder.get() == null; - listener.onResponse(new ClearScrollResponse(succeeded, numberOfFreedSearchContexts.get())); - } - } - - void onFailedFreedContext(Throwable e, DiscoveryNode node) { - logger.warn((Supplier) () -> new ParameterizedMessage("Clear SC failed on node[{}]", node), e); - if (expectedOps.countDown()) { - listener.onResponse(new ClearScrollResponse(false, numberOfFreedSearchContexts.get())); - } else { - expHolder.set(e); - } - } - + Runnable runnable = new ClearScrollController(request, listener, clusterService.state().nodes(), logger, searchTransportService); + runnable.run(); } } diff --git a/core/src/main/java/org/elasticsearch/action/support/GroupedActionListener.java b/core/src/main/java/org/elasticsearch/action/support/GroupedActionListener.java index 85b418e046cc0..ed9b7c8d15d60 100644 --- a/core/src/main/java/org/elasticsearch/action/support/GroupedActionListener.java +++ b/core/src/main/java/org/elasticsearch/action/support/GroupedActionListener.java @@ -37,7 +37,7 @@ public final class GroupedActionListener implements ActionListener { private final CountDown countDown; private final AtomicInteger pos = new AtomicInteger(); - private final AtomicArray roles; + private final AtomicArray results; private final ActionListener> delegate; private final Collection defaults; private final AtomicReference failure = new AtomicReference<>(); @@ -49,7 +49,7 @@ public final class GroupedActionListener implements ActionListener { */ public GroupedActionListener(ActionListener> delegate, int groupSize, Collection defaults) { - roles = new AtomicArray<>(groupSize); + results = new AtomicArray<>(groupSize); countDown = new CountDown(groupSize); this.delegate = delegate; this.defaults = defaults; @@ -57,12 +57,12 @@ public GroupedActionListener(ActionListener> delegate, int groupSi @Override public void onResponse(T element) { - roles.set(pos.incrementAndGet() - 1, element); + results.setOnce(pos.incrementAndGet() - 1, element); if (countDown.countDown()) { if (failure.get() != null) { delegate.onFailure(failure.get()); } else { - List collect = this.roles.asList(); + List collect = this.results.asList(); collect.addAll(defaults); delegate.onResponse(Collections.unmodifiableList(collect)); } diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index 8973890021f7a..543118a172f95 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -375,7 +375,7 @@ private static class ShardStartedTransportHandler implements TransportRequestHan public void messageReceived(ShardEntry request, TransportChannel channel) throws Exception { logger.debug("{} received shard started for [{}]", request.shardId, request); clusterService.submitStateUpdateTask( - "shard-started", + "shard-started " + request, request, ClusterStateTaskConfig.build(Priority.URGENT), shardStartedClusterStateTaskExecutor, diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index e47585356a01b..fcc0fdebdd4bc 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -243,7 +243,8 @@ public SortedMap getAliasAndIndexLookup() { * * @param aliases The names of the index aliases to find * @param concreteIndices The concrete indexes the index aliases must point to order to be returned. - * @return the found index aliases grouped by index + * @return a map of index to a list of alias metadata, the list corresponding to a concrete index will be empty if no aliases are + * present for that index */ public ImmutableOpenMap> findAliases(final String[] aliases, String[] concreteIndices) { assert aliases != null; @@ -273,8 +274,8 @@ public int compare(AliasMetaData o1, AliasMetaData o2) { return o1.alias().compareTo(o2.alias()); } }); - mapBuilder.put(index, Collections.unmodifiableList(filteredValues)); } + mapBuilder.put(index, Collections.unmodifiableList(filteredValues)); } return mapBuilder.build(); } diff --git a/core/src/main/java/org/elasticsearch/common/geo/GeoUtils.java b/core/src/main/java/org/elasticsearch/common/geo/GeoUtils.java index a586414631805..aed72f502bfe9 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/GeoUtils.java +++ b/core/src/main/java/org/elasticsearch/common/geo/GeoUtils.java @@ -82,7 +82,7 @@ public static boolean isValidLatitude(double latitude) { /** Returns true if longitude is actually a valid longitude value. */ public static boolean isValidLongitude(double longitude) { - if (Double.isNaN(longitude) || Double.isNaN(longitude) || longitude < GeoUtils.MIN_LON || longitude > GeoUtils.MAX_LON) { + if (Double.isNaN(longitude) || Double.isInfinite(longitude) || longitude < GeoUtils.MIN_LON || longitude > GeoUtils.MAX_LON) { return false; } return true; diff --git a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java index c213c384611f5..52550f1ba67df 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -49,6 +49,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.SimpleCollector; import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSortField; @@ -245,20 +246,6 @@ protected Object doBody(String segmentFileName) throws IOException { }.run(); } - /** - * Wraps delegate with count based early termination collector with a threshold of maxCountHits - */ - public static final EarlyTerminatingCollector wrapCountBasedEarlyTerminatingCollector(final Collector delegate, int maxCountHits) { - return new EarlyTerminatingCollector(delegate, maxCountHits); - } - - /** - * Wraps delegate with a time limited collector with a timeout of timeoutInMillis - */ - public static final TimeLimitingCollector wrapTimeLimitingCollector(final Collector delegate, final Counter counter, long timeoutInMillis) { - return new TimeLimitingCollector(delegate, counter, timeoutInMillis); - } - /** * Check whether there is one or more documents matching the provided query. */ @@ -617,71 +604,6 @@ public static void writeExplanation(StreamOutput out, Explanation explanation) t } } - /** - * This exception is thrown when {@link org.elasticsearch.common.lucene.Lucene.EarlyTerminatingCollector} - * reaches early termination - * */ - public static final class EarlyTerminationException extends ElasticsearchException { - - public EarlyTerminationException(String msg) { - super(msg); - } - - public EarlyTerminationException(StreamInput in) throws IOException{ - super(in); - } - } - - /** - * A collector that terminates early by throwing {@link org.elasticsearch.common.lucene.Lucene.EarlyTerminationException} - * when count of matched documents has reached maxCountHits - */ - public static final class EarlyTerminatingCollector extends SimpleCollector { - - private final int maxCountHits; - private final Collector delegate; - - private int count = 0; - private LeafCollector leafCollector; - - EarlyTerminatingCollector(final Collector delegate, int maxCountHits) { - this.maxCountHits = maxCountHits; - this.delegate = Objects.requireNonNull(delegate); - } - - public int count() { - return count; - } - - public boolean exists() { - return count > 0; - } - - @Override - public void setScorer(Scorer scorer) throws IOException { - leafCollector.setScorer(scorer); - } - - @Override - public void collect(int doc) throws IOException { - leafCollector.collect(doc); - - if (++count >= maxCountHits) { - throw new EarlyTerminationException("early termination [CountBased]"); - } - } - - @Override - public void doSetNextReader(LeafReaderContext atomicReaderContext) throws IOException { - leafCollector = delegate.getLeafCollector(atomicReaderContext); - } - - @Override - public boolean needsScores() { - return delegate.needsScores(); - } - } - private Lucene() { } @@ -838,14 +760,16 @@ public void delete() { } /** - * Given a {@link Scorer}, return a {@link Bits} instance that will match + * Given a {@link ScorerSupplier}, return a {@link Bits} instance that will match * all documents contained in the set. Note that the returned {@link Bits} * instance MUST be consumed in order. */ - public static Bits asSequentialAccessBits(final int maxDoc, @Nullable Scorer scorer) throws IOException { - if (scorer == null) { + public static Bits asSequentialAccessBits(final int maxDoc, @Nullable ScorerSupplier scorerSupplier) throws IOException { + if (scorerSupplier == null) { return new Bits.MatchNoBits(maxDoc); } + // Since we want bits, we need random-access + final Scorer scorer = scorerSupplier.get(true); // this never returns null final TwoPhaseIterator twoPhase = scorer.twoPhaseIterator(); final DocIdSetIterator iterator; if (twoPhase == null) { diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java b/core/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java index 3a5d71d1fcd5c..e9db2928ca724 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java @@ -22,7 +22,7 @@ import org.apache.lucene.search.Collector; import org.apache.lucene.search.FilterLeafCollector; import org.apache.lucene.search.LeafCollector; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.elasticsearch.common.lucene.Lucene; @@ -41,9 +41,9 @@ public FilteredCollector(Collector collector, Weight filter) { @Override public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { - final Scorer filterScorer = filter.scorer(context); + final ScorerSupplier filterScorerSupplier = filter.scorerSupplier(context); final LeafCollector in = collector.getLeafCollector(context); - final Bits bits = Lucene.asSequentialAccessBits(context.reader().maxDoc(), filterScorer); + final Bits bits = Lucene.asSequentialAccessBits(context.reader().maxDoc(), filterScorerSupplier); return new FilterLeafCollector(in) { @Override diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java index dbfc1f0af11b6..b8e1039b2df1d 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java @@ -164,6 +164,11 @@ public Query rewrite(IndexReader reader) throws IOException { } } if (terms.isEmpty()) { + if (sizeMinus1 == 0) { + // no prefix and the phrase query is empty + return Queries.newMatchNoDocsQuery("No terms supplied for " + MultiPhrasePrefixQuery.class.getName()); + } + // if the terms does not exist we could return a MatchNoDocsQuery but this would break the unified highlighter // which rewrites query with an empty reader. return new BooleanQuery.Builder() diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java index 2f2a70537c03d..40465dc6ece07 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java @@ -27,6 +27,7 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.elasticsearch.common.io.stream.StreamInput; @@ -174,8 +175,8 @@ private FiltersFunctionFactorScorer functionScorer(LeafReaderContext context) th for (int i = 0; i < filterFunctions.length; i++) { FilterFunction filterFunction = filterFunctions[i]; functions[i] = filterFunction.function.getLeafScoreFunction(context); - Scorer filterScorer = filterWeights[i].scorer(context); - docSets[i] = Lucene.asSequentialAccessBits(context.reader().maxDoc(), filterScorer); + ScorerSupplier filterScorerSupplier = filterWeights[i].scorerSupplier(context); + docSets[i] = Lucene.asSequentialAccessBits(context.reader().maxDoc(), filterScorerSupplier); } return new FiltersFunctionFactorScorer(this, subQueryScorer, scoreMode, filterFunctions, maxBoost, functions, docSets, combineFunction, needsScores); } @@ -200,7 +201,7 @@ public Explanation explain(LeafReaderContext context, int doc) throws IOExceptio List filterExplanations = new ArrayList<>(); for (int i = 0; i < filterFunctions.length; ++i) { Bits docSet = Lucene.asSequentialAccessBits(context.reader().maxDoc(), - filterWeights[i].scorer(context)); + filterWeights[i].scorerSupplier(context)); if (docSet.get(doc)) { FilterFunction filterFunction = filterFunctions[i]; Explanation functionExplanation = filterFunction.function.getLeafScoreFunction(context).explainScore(doc, expl); diff --git a/core/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java b/core/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java index 68e2865e28452..69173cc4216cd 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java +++ b/core/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java @@ -83,4 +83,8 @@ private void setLastCommittedTranslogGeneration(List comm public SnapshotDeletionPolicy getIndexDeletionPolicy() { return indexDeletionPolicy; } + + public TranslogDeletionPolicy getTranslogDeletionPolicy() { + return translogDeletionPolicy; + } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 7763c8d04a4e7..6e93d1feed5f8 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -1102,8 +1102,8 @@ public static class Delete extends Operation { public Delete(String type, String id, Term uid, long seqNo, long primaryTerm, long version, VersionType versionType, Origin origin, long startTime) { super(uid, seqNo, primaryTerm, version, versionType, origin, startTime); - this.type = type; - this.id = id; + this.type = Objects.requireNonNull(type); + this.id = Objects.requireNonNull(id); } public Delete(String type, String id, Term uid) { diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 8c0481d686f41..f84f76b537e0d 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -305,7 +305,8 @@ private void recoverFromTranslogInternal() throws IOException { Translog.TranslogGeneration translogGeneration = translog.getGeneration(); final int opsRecovered; try { - Translog.Snapshot snapshot = translog.newSnapshot(); + final long translogGen = Long.parseLong(lastCommittedSegmentInfos.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)); + Translog.Snapshot snapshot = translog.newSnapshot(translogGen); opsRecovered = config().getTranslogRecoveryRunner().run(this, snapshot); } catch (Exception e) { throw new EngineException(shardId, "failed to recover from translog", e); @@ -321,6 +322,8 @@ private void recoverFromTranslogInternal() throws IOException { } else if (translog.isCurrent(translogGeneration) == false) { commitIndexWriter(indexWriter, translog, lastCommittedSegmentInfos.getUserData().get(Engine.SYNC_COMMIT_ID)); } + // clean up what's not needed + translog.trimUnreferencedReaders(); } private Translog openTranslog(EngineConfig engineConfig, IndexWriter writer, TranslogDeletionPolicy translogDeletionPolicy, LongSupplier globalCheckpointSupplier) throws IOException { @@ -1772,7 +1775,7 @@ protected void doRun() throws Exception { * @param syncId the sync flush ID ({@code null} if not committing a synced flush) * @throws IOException if an I/O exception occurs committing the specfied writer */ - private void commitIndexWriter(final IndexWriter writer, final Translog translog, @Nullable final String syncId) throws IOException { + protected void commitIndexWriter(final IndexWriter writer, final Translog translog, @Nullable final String syncId) throws IOException { ensureCanFlush(); try { final long localCheckpoint = seqNoService().getLocalCheckpoint(); diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java b/core/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java index 339e70c50b1c8..3ae4e6ebc8123 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java @@ -85,76 +85,6 @@ public final void sort(Comparator c) { throw new UnsupportedOperationException("doc values are unmodifiable"); } - public static final class Strings extends ScriptDocValues { - - private final SortedBinaryDocValues in; - private BytesRefBuilder[] values = new BytesRefBuilder[0]; - private int count; - - public Strings(SortedBinaryDocValues in) { - this.in = in; - } - - @Override - public void setNextDocId(int docId) throws IOException { - if (in.advanceExact(docId)) { - resize(in.docValueCount()); - for (int i = 0; i < count; i++) { - values[i].copyBytes(in.nextValue()); - } - } else { - resize(0); - } - } - - /** - * Set the {@link #size()} and ensure that the {@link #values} array can - * store at least that many entries. - */ - protected void resize(int newSize) { - count = newSize; - if (newSize > values.length) { - final int oldLength = values.length; - values = ArrayUtil.grow(values, count); - for (int i = oldLength; i < values.length; ++i) { - values[i] = new BytesRefBuilder(); - } - } - } - - public SortedBinaryDocValues getInternalValues() { - return this.in; - } - - public BytesRef getBytesValue() { - if (size() > 0) { - return values[0].get(); - } else { - return null; - } - } - - public String getValue() { - BytesRef value = getBytesValue(); - if (value == null) { - return null; - } else { - return value.utf8ToString(); - } - } - - @Override - public String get(int index) { - return values[index].get().utf8ToString(); - } - - @Override - public int size() { - return count; - } - - } - public static final class Longs extends ScriptDocValues { protected static final DeprecationLogger deprecationLogger = new DeprecationLogger(ESLoggerFactory.getLogger(Longs.class)); @@ -570,13 +500,13 @@ private static boolean[] grow(boolean[] array, int minSize) { } - public static final class BytesRefs extends ScriptDocValues { + abstract static class BinaryScriptDocValues extends ScriptDocValues { private final SortedBinaryDocValues in; - private BytesRef[] values; - private int count; + protected BytesRefBuilder[] values = new BytesRefBuilder[0]; + protected int count; - public BytesRefs(SortedBinaryDocValues in) { + BinaryScriptDocValues(SortedBinaryDocValues in) { this.in = in; } @@ -585,7 +515,10 @@ public void setNextDocId(int docId) throws IOException { if (in.advanceExact(docId)) { resize(in.docValueCount()); for (int i = 0; i < count; i++) { - values[i] = in.nextValue(); + // We need to make a copy here, because BytesBinaryDVAtomicFieldData's SortedBinaryDocValues + // implementation reuses the returned BytesRef. Otherwise we would end up with the same BytesRef + // instance for all slots in the values array. + values[i].copyBytes(in.nextValue()); } } else { resize(0); @@ -598,32 +531,69 @@ public void setNextDocId(int docId) throws IOException { */ protected void resize(int newSize) { count = newSize; - if (values == null) { - values = new BytesRef[newSize]; - } else { + if (newSize > values.length) { + final int oldLength = values.length; values = ArrayUtil.grow(values, count); + for (int i = oldLength; i < values.length; ++i) { + values[i] = new BytesRefBuilder(); + } } } - public SortedBinaryDocValues getInternalValues() { - return this.in; + @Override + public int size() { + return count; } - public BytesRef getValue() { - if (count == 0) { - return new BytesRef(); + } + + public static final class Strings extends BinaryScriptDocValues { + + public Strings(SortedBinaryDocValues in) { + super(in); + } + + @Override + public String get(int index) { + return values[index].get().utf8ToString(); + } + + public BytesRef getBytesValue() { + if (size() > 0) { + return values[0].get(); + } else { + return null; + } + } + + public String getValue() { + BytesRef value = getBytesValue(); + if (value == null) { + return null; + } else { + return value.utf8ToString(); } - return values[0]; + } + + } + + public static final class BytesRefs extends BinaryScriptDocValues { + + public BytesRefs(SortedBinaryDocValues in) { + super(in); } @Override public BytesRef get(int index) { - return values[index]; + return values[index].get(); } - @Override - public int size() { - return count; + public BytesRef getValue() { + if (count == 0) { + return new BytesRef(); + } + return values[0].get(); } + } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/AllFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/AllFieldMapper.java index f3001db39260a..0f88d3223edce 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/AllFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/AllFieldMapper.java @@ -133,6 +133,7 @@ public MetadataFieldMapper.Builder parse(String name, Map n } parseTextField(builder, builder.name, node, parserContext); + boolean enabledSet = false; for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { Map.Entry entry = iterator.next(); String fieldName = entry.getKey(); @@ -140,9 +141,16 @@ public MetadataFieldMapper.Builder parse(String name, Map n if (fieldName.equals("enabled")) { boolean enabled = TypeParsers.nodeBooleanValueLenient(name, "enabled", fieldNode); builder.enabled(enabled ? EnabledAttributeMapper.ENABLED : EnabledAttributeMapper.DISABLED); + enabledSet = true; iterator.remove(); } } + if (enabledSet == false && parserContext.indexVersionCreated().before(Version.V_6_0_0_alpha1)) { + // So there is no "enabled" field, however, the index was created prior to 6.0, + // and therefore the default for this particular index should be "true" for + // enabling _all + builder.enabled(EnabledAttributeMapper.ENABLED); + } return builder; } @@ -150,7 +158,13 @@ public MetadataFieldMapper.Builder parse(String name, Map n public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) { final Settings indexSettings = context.mapperService().getIndexSettings().getSettings(); if (fieldType != null) { - return new AllFieldMapper(indexSettings, fieldType); + if (context.indexVersionCreated().before(Version.V_6_0_0_alpha1)) { + // The index was created prior to 6.0, and therefore the default for this + // particular index should be "true" for enabling _all + return new AllFieldMapper(fieldType.clone(), EnabledAttributeMapper.ENABLED, indexSettings); + } else { + return new AllFieldMapper(indexSettings, fieldType); + } } else { return parse(NAME, Collections.emptyMap(), context) .build(new BuilderContext(indexSettings, new ContentPath(1))); @@ -197,7 +211,6 @@ private AllFieldMapper(Settings indexSettings, MappedFieldType existing) { private AllFieldMapper(MappedFieldType fieldType, EnabledAttributeMapper enabled, Settings indexSettings) { super(NAME, fieldType, Defaults.FIELD_TYPE, indexSettings); this.enabledState = enabled; - } public boolean enabled() { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 489f4702bc36c..c2de26c96b38f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -424,15 +424,33 @@ private static ParseContext nestedContext(ParseContext context, ObjectMapper map context = context.createNestedContext(mapper.fullPath()); ParseContext.Document nestedDoc = context.doc(); ParseContext.Document parentDoc = nestedDoc.getParent(); - // pre add the uid field if possible (id was already provided) - IndexableField uidField = parentDoc.getField(UidFieldMapper.NAME); - if (uidField != null) { - // we don't need to add it as a full uid field in nested docs, since we don't need versioning - // we also rely on this for UidField#loadVersion - // this is a deeply nested field - nestedDoc.add(new Field(UidFieldMapper.NAME, uidField.stringValue(), UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + // We need to add the uid or id to this nested Lucene document too, + // If we do not do this then when a document gets deleted only the root Lucene document gets deleted and + // not the nested Lucene documents! Besides the fact that we would have zombie Lucene documents, the ordering of + // documents inside the Lucene index (document blocks) will be incorrect, as nested documents of different root + // documents are then aligned with other root documents. This will lead tothe nested query, sorting, aggregations + // and inner hits to fail or yield incorrect results. + if (context.mapperService().getIndexSettings().isSingleType()) { + IndexableField idField = parentDoc.getField(IdFieldMapper.NAME); + if (idField != null) { + // We just need to store the id as indexed field, so that IndexWriter#deleteDocuments(term) can then + // delete it when the root document is deleted too. + nestedDoc.add(new Field(IdFieldMapper.NAME, idField.stringValue(), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); + } else { + throw new IllegalStateException("The root document of a nested document should have an id field"); + } + } else { + IndexableField uidField = parentDoc.getField(UidFieldMapper.NAME); + if (uidField != null) { + /// We just need to store the uid as indexed field, so that IndexWriter#deleteDocuments(term) can then + // delete it when the root document is deleted too. + nestedDoc.add(new Field(UidFieldMapper.NAME, uidField.stringValue(), UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + } else { + throw new IllegalStateException("The root document of a nested document should have an uid field"); + } } + // the type of the nested doc starts with __, so we can identify that its a nested one in filters // note, we don't prefix it with the type of the doc since it allows us to execute a nested query // across types (for example, with similar nested objects) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java index a9a765f1c3a0e..813a546aaed36 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java @@ -52,6 +52,7 @@ public static class Defaults { public static final String NAME = IdFieldMapper.NAME; public static final MappedFieldType FIELD_TYPE = new IdFieldType(); + public static final MappedFieldType NESTED_FIELD_TYPE; static { FIELD_TYPE.setTokenized(false); @@ -62,6 +63,10 @@ public static class Defaults { FIELD_TYPE.setSearchAnalyzer(Lucene.KEYWORD_ANALYZER); FIELD_TYPE.setName(NAME); FIELD_TYPE.freeze(); + + NESTED_FIELD_TYPE = FIELD_TYPE.clone(); + NESTED_FIELD_TYPE.setStored(false); + NESTED_FIELD_TYPE.freeze(); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java index 2ed6658e87c65..c18b66cf61855 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java @@ -134,6 +134,10 @@ protected void parseCreateField(ParseContext context, List field value = context.parser().textOrNull(); } + if (value == null && fieldType().nullValue() == null) { + return; + } + final int tokenCount; if (value == null) { tokenCount = (Integer) fieldType().nullValue(); diff --git a/core/src/main/java/org/elasticsearch/index/query/SimpleQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/SimpleQueryParser.java index 9899ba9a748cc..f6e7dd32eb233 100644 --- a/core/src/main/java/org/elasticsearch/index/query/SimpleQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/SimpleQueryParser.java @@ -27,6 +27,7 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; @@ -36,6 +37,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import java.io.IOException; +import java.util.Iterator; import java.util.Map; import java.util.Objects; import java.util.List; @@ -79,18 +81,21 @@ protected Query newTermQuery(Term term) { @Override public Query newDefaultQuery(String text) { - BooleanQuery.Builder bq = new BooleanQuery.Builder(); + List disjuncts = new ArrayList<>(); for (Map.Entry entry : weights.entrySet()) { try { Query q = createBooleanQuery(entry.getKey(), text, super.getDefaultOperator()); if (q != null) { - bq.add(wrapWithBoost(q, entry.getValue()), BooleanClause.Occur.SHOULD); + disjuncts.add(wrapWithBoost(q, entry.getValue())); } } catch (RuntimeException e) { rethrowUnlessLenient(e); } } - return super.simplify(bq.build()); + if (disjuncts.size() == 1) { + return disjuncts.get(0); + } + return new DisjunctionMaxQuery(disjuncts, 1.0f); } /** @@ -99,23 +104,26 @@ public Query newDefaultQuery(String text) { */ @Override public Query newFuzzyQuery(String text, int fuzziness) { - BooleanQuery.Builder bq = new BooleanQuery.Builder(); + List disjuncts = new ArrayList<>(); for (Map.Entry entry : weights.entrySet()) { final String fieldName = entry.getKey(); try { final BytesRef term = getAnalyzer().normalize(fieldName, text); Query query = new FuzzyQuery(new Term(fieldName, term), fuzziness); - bq.add(wrapWithBoost(query, entry.getValue()), BooleanClause.Occur.SHOULD); + disjuncts.add(wrapWithBoost(query, entry.getValue())); } catch (RuntimeException e) { rethrowUnlessLenient(e); } } - return super.simplify(bq.build()); + if (disjuncts.size() == 1) { + return disjuncts.get(0); + } + return new DisjunctionMaxQuery(disjuncts, 1.0f); } @Override public Query newPhraseQuery(String text, int slop) { - BooleanQuery.Builder bq = new BooleanQuery.Builder(); + List disjuncts = new ArrayList<>(); for (Map.Entry entry : weights.entrySet()) { try { String field = entry.getKey(); @@ -129,13 +137,16 @@ public Query newPhraseQuery(String text, int slop) { Float boost = entry.getValue(); Query q = createPhraseQuery(field, text, slop); if (q != null) { - bq.add(wrapWithBoost(q, boost), BooleanClause.Occur.SHOULD); + disjuncts.add(wrapWithBoost(q, boost)); } } catch (RuntimeException e) { rethrowUnlessLenient(e); } } - return super.simplify(bq.build()); + if (disjuncts.size() == 1) { + return disjuncts.get(0); + } + return new DisjunctionMaxQuery(disjuncts, 1.0f); } /** @@ -144,25 +155,28 @@ public Query newPhraseQuery(String text, int slop) { */ @Override public Query newPrefixQuery(String text) { - BooleanQuery.Builder bq = new BooleanQuery.Builder(); + List disjuncts = new ArrayList<>(); for (Map.Entry entry : weights.entrySet()) { final String fieldName = entry.getKey(); try { if (settings.analyzeWildcard()) { Query analyzedQuery = newPossiblyAnalyzedQuery(fieldName, text); if (analyzedQuery != null) { - bq.add(wrapWithBoost(analyzedQuery, entry.getValue()), BooleanClause.Occur.SHOULD); + disjuncts.add(wrapWithBoost(analyzedQuery, entry.getValue())); } } else { Term term = new Term(fieldName, getAnalyzer().normalize(fieldName, text)); Query query = new PrefixQuery(term); - bq.add(wrapWithBoost(query, entry.getValue()), BooleanClause.Occur.SHOULD); + disjuncts.add(wrapWithBoost(query, entry.getValue())); } } catch (RuntimeException e) { return rethrowUnlessLenient(e); } } - return super.simplify(bq.build()); + if (disjuncts.size() == 1) { + return disjuncts.get(0); + } + return new DisjunctionMaxQuery(disjuncts, 1.0f); } /** diff --git a/core/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java b/core/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java index 7c1f91d3587d5..944e27ed2f59d 100644 --- a/core/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java @@ -22,9 +22,6 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.index.Term; import org.apache.lucene.queries.BlendedTermQuery; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanClause.Occur; -import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.MatchNoDocsQuery; @@ -84,7 +81,7 @@ public Query parse(MultiMatchQueryBuilder.Type type, Map fieldNam queryBuilder = new QueryBuilder(tieBreaker); break; case CROSS_FIELDS: - queryBuilder = new CrossFieldsQueryBuilder(tieBreaker); + queryBuilder = new CrossFieldsQueryBuilder(); break; default: throw new IllegalStateException("No such type: " + type); @@ -99,15 +96,9 @@ public Query parse(MultiMatchQueryBuilder.Type type, Map fieldNam private QueryBuilder queryBuilder; public class QueryBuilder { - protected final boolean groupDismax; protected final float tieBreaker; public QueryBuilder(float tieBreaker) { - this(tieBreaker != 1.0f, tieBreaker); - } - - public QueryBuilder(boolean groupDismax, float tieBreaker) { - this.groupDismax = groupDismax; this.tieBreaker = tieBreaker; } @@ -134,19 +125,11 @@ private Query combineGrouped(List groupQuery) { if (groupQuery.size() == 1) { return groupQuery.get(0); } - if (groupDismax) { - List queries = new ArrayList<>(); - for (Query query : groupQuery) { - queries.add(query); - } - return new DisjunctionMaxQuery(queries, tieBreaker); - } else { - final BooleanQuery.Builder booleanQuery = new BooleanQuery.Builder(); - for (Query query : groupQuery) { - booleanQuery.add(query, BooleanClause.Occur.SHOULD); - } - return booleanQuery.build(); + List queries = new ArrayList<>(); + for (Query query : groupQuery) { + queries.add(query); } + return new DisjunctionMaxQuery(queries, tieBreaker); } public Query blendTerm(Term term, MappedFieldType fieldType) { @@ -165,8 +148,8 @@ public Query termQuery(MappedFieldType fieldType, Object value) { final class CrossFieldsQueryBuilder extends QueryBuilder { private FieldAndFieldType[] blendedFields; - CrossFieldsQueryBuilder(float tieBreaker) { - super(false, tieBreaker); + CrossFieldsQueryBuilder() { + super(0.0f); } @Override @@ -306,8 +289,6 @@ static Query blendTerms(QueryShardContext context, BytesRef[] values, Float comm blendedBoost = Arrays.copyOf(blendedBoost, i); if (commonTermsCutoff != null) { queries.add(BlendedTermQuery.commonTermsBlendedQuery(terms, blendedBoost, commonTermsCutoff)); - } else if (tieBreaker == 1.0f) { - queries.add(BlendedTermQuery.booleanBlendedQuery(terms, blendedBoost)); } else { queries.add(BlendedTermQuery.dismaxBlendedQuery(terms, blendedBoost, tieBreaker)); } @@ -318,11 +299,7 @@ static Query blendTerms(QueryShardContext context, BytesRef[] values, Float comm // best effort: add clauses that are not term queries so that they have an opportunity to match // however their score contribution will be different // TODO: can we improve this? - BooleanQuery.Builder bq = new BooleanQuery.Builder(); - for (Query query : queries) { - bq.add(query, Occur.SHOULD); - } - return bq.build(); + return new DisjunctionMaxQuery(queries, 1.0f); } } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index e287278e1c519..90d127c4a5cb3 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -642,10 +642,11 @@ private Engine.IndexResult index(Engine engine, Engine.Index index) throws IOExc return result; } - public Engine.NoOp prepareMarkingSeqNoAsNoOp(long seqNo, String reason) { + public Engine.NoOp prepareMarkingSeqNoAsNoOpOnReplica(long seqNo, long opPrimaryTerm, String reason) { verifyReplicationTarget(); + assert opPrimaryTerm <= this.primaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.primaryTerm + "]"; long startTime = System.nanoTime(); - return new Engine.NoOp(seqNo, primaryTerm, Engine.Operation.Origin.REPLICA, startTime, reason); + return new Engine.NoOp(seqNo, opPrimaryTerm, Engine.Operation.Origin.REPLICA, startTime, reason); } public Engine.NoOpResult markSeqNoAsNoOp(Engine.NoOp noOp) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/index/translog/Checkpoint.java b/core/src/main/java/org/elasticsearch/index/translog/Checkpoint.java index ce5cc8e76010b..547d5aa499fb3 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Checkpoint.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Checkpoint.java @@ -44,6 +44,7 @@ final class Checkpoint { final long minSeqNo; final long maxSeqNo; final long globalCheckpoint; + final long minTranslogGeneration; private static final int INITIAL_VERSION = 1; // start with 1, just to recognize there was some magic serialization logic before private static final int CURRENT_VERSION = 2; // introduction of global checkpoints @@ -58,6 +59,7 @@ final class Checkpoint { + Long.BYTES // minimum sequence number, introduced in 6.0.0 + Long.BYTES // maximum sequence number, introduced in 6.0.0 + Long.BYTES // global checkpoint, introduced in 6.0.0 + + Long.BYTES // minimum translog generation in the translog - introduced in 6.0.0 + CodecUtil.footerLength(); // size of 5.0.0 checkpoint @@ -76,15 +78,19 @@ final class Checkpoint { * @param minSeqNo the current minimum sequence number of all operations in the translog * @param maxSeqNo the current maximum sequence number of all operations in the translog * @param globalCheckpoint the last-known global checkpoint + * @param minTranslogGeneration the minimum generation referenced by the translog at this moment. */ - Checkpoint(long offset, int numOps, long generation, long minSeqNo, long maxSeqNo, long globalCheckpoint) { - assert minSeqNo <= maxSeqNo; + Checkpoint(long offset, int numOps, long generation, long minSeqNo, long maxSeqNo, long globalCheckpoint, long minTranslogGeneration) { + assert minSeqNo <= maxSeqNo : "minSeqNo [" + minSeqNo + "] is higher than maxSeqNo [" + maxSeqNo + "]"; + assert minTranslogGeneration <= generation : + "minTranslogGen [" + minTranslogGeneration + "] is higher than generation [" + generation + "]"; this.offset = offset; this.numOps = numOps; this.generation = generation; this.minSeqNo = minSeqNo; this.maxSeqNo = maxSeqNo; this.globalCheckpoint = globalCheckpoint; + this.minTranslogGeneration = minTranslogGeneration; } private void write(DataOutput out) throws IOException { @@ -94,16 +100,18 @@ private void write(DataOutput out) throws IOException { out.writeLong(minSeqNo); out.writeLong(maxSeqNo); out.writeLong(globalCheckpoint); + out.writeLong(minTranslogGeneration); } - static Checkpoint emptyTranslogCheckpoint(final long offset, final long generation, final long globalCheckpoint) { + static Checkpoint emptyTranslogCheckpoint(final long offset, final long generation, final long globalCheckpoint, + long minTranslogGeneration) { final long minSeqNo = SequenceNumbersService.NO_OPS_PERFORMED; final long maxSeqNo = SequenceNumbersService.NO_OPS_PERFORMED; - return new Checkpoint(offset, 0, generation, minSeqNo, maxSeqNo, globalCheckpoint); + return new Checkpoint(offset, 0, generation, minSeqNo, maxSeqNo, globalCheckpoint, minTranslogGeneration); } static Checkpoint readCheckpointV6_0_0(final DataInput in) throws IOException { - return new Checkpoint(in.readLong(), in.readInt(), in.readLong(), in.readLong(), in.readLong(), in.readLong()); + return new Checkpoint(in.readLong(), in.readInt(), in.readLong(), in.readLong(), in.readLong(), in.readLong(), in.readLong()); } // reads a checksummed checkpoint introduced in ES 5.0.0 @@ -111,7 +119,8 @@ static Checkpoint readCheckpointV5_0_0(final DataInput in) throws IOException { final long minSeqNo = SequenceNumbersService.NO_OPS_PERFORMED; final long maxSeqNo = SequenceNumbersService.NO_OPS_PERFORMED; final long globalCheckpoint = SequenceNumbersService.UNASSIGNED_SEQ_NO; - return new Checkpoint(in.readLong(), in.readInt(), in.readLong(), minSeqNo, maxSeqNo, globalCheckpoint); + final long minTranslogGeneration = -1L; + return new Checkpoint(in.readLong(), in.readInt(), in.readLong(), minSeqNo, maxSeqNo, globalCheckpoint, minTranslogGeneration); } @Override @@ -123,6 +132,7 @@ public String toString() { ", minSeqNo=" + minSeqNo + ", maxSeqNo=" + maxSeqNo + ", globalCheckpoint=" + globalCheckpoint + + ", minTranslogGeneration=" + minTranslogGeneration + '}'; } diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index c351f0346236e..d4a5fe0d99fd7 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -24,6 +24,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.IOUtils; +import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; @@ -41,6 +42,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.IndexShardComponent; @@ -55,7 +57,9 @@ import java.nio.file.StandardOpenOption; import java.util.ArrayList; import java.util.Collections; +import java.util.Iterator; import java.util.List; +import java.util.Objects; import java.util.Optional; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReadWriteLock; @@ -63,7 +67,6 @@ import java.util.function.LongSupplier; import java.util.regex.Matcher; import java.util.regex.Pattern; -import java.util.stream.Collectors; import java.util.stream.Stream; /** @@ -170,11 +173,12 @@ public Translog( && Files.deleteIfExists(nextTranslogFile)) { // delete it and log a warning logger.warn("deleted previously created, but not yet committed, next generation [{}]. This can happen due to a tragic exception when creating a new generation", nextTranslogFile.getFileName()); } - this.readers.addAll(recoverFromFiles(deletionPolicy.getMinTranslogGenerationForRecovery(), checkpoint)); + this.readers.addAll(recoverFromFiles(checkpoint)); if (readers.isEmpty()) { throw new IllegalStateException("at least one reader must be recovered"); } boolean success = false; + current = null; try { current = createWriter(checkpoint.generation + 1); success = true; @@ -192,14 +196,13 @@ public Translog( final long generation = deletionPolicy.getMinTranslogGenerationForRecovery(); logger.debug("wipe translog location - creating new translog, starting generation [{}]", generation); Files.createDirectories(location); - final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(0, generation, globalCheckpointSupplier.getAsLong()); + final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(0, generation, globalCheckpointSupplier.getAsLong(), generation); final Path checkpointFile = location.resolve(CHECKPOINT_FILE_NAME); Checkpoint.write(getChannelFactory(), checkpointFile, checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); IOUtils.fsync(checkpointFile, false); - current = createWriter(generation); - + current = createWriter(generation, generation); + readers.clear(); } - // now that we know which files are there, create a new current one. } catch (Exception e) { // close the opened translog files if we fail to create a new translog... IOUtils.closeWhileHandlingException(current); @@ -209,29 +212,46 @@ public Translog( } /** recover all translog files found on disk */ - private ArrayList recoverFromFiles(long translogFileGeneration, Checkpoint checkpoint) throws IOException { + private ArrayList recoverFromFiles(Checkpoint checkpoint) throws IOException { boolean success = false; ArrayList foundTranslogs = new ArrayList<>(); final Path tempFile = Files.createTempFile(location, TRANSLOG_FILE_PREFIX, TRANSLOG_FILE_SUFFIX); // a temp file to copy checkpoint to - note it must be in on the same FS otherwise atomic move won't work boolean tempFileRenamed = false; try (ReleasableLock lock = writeLock.acquire()) { logger.debug("open uncommitted translog checkpoint {}", checkpoint); + + final long minGenerationToRecoverFrom; + if (checkpoint.minTranslogGeneration < 0) { + final Version indexVersionCreated = indexSettings().getIndexVersionCreated(); + assert indexVersionCreated.before(Version.V_6_0_0_alpha3) : + "no minTranslogGeneration in checkpoint, but index was created with version [" + indexVersionCreated + "]"; + minGenerationToRecoverFrom = deletionPolicy.getMinTranslogGenerationForRecovery(); + } else { + minGenerationToRecoverFrom = checkpoint.minTranslogGeneration; + } + final String checkpointTranslogFile = getFilename(checkpoint.generation); // we open files in reverse order in order to validate tranlsog uuid before we start traversing the translog based on // the generation id we found in the lucene commit. This gives for better error messages if the wrong // translog was found. foundTranslogs.add(openReader(location.resolve(checkpointTranslogFile), checkpoint)); - for (long i = checkpoint.generation - 1; i >= translogFileGeneration; i--) { + for (long i = checkpoint.generation - 1; i >= minGenerationToRecoverFrom; i--) { Path committedTranslogFile = location.resolve(getFilename(i)); if (Files.exists(committedTranslogFile) == false) { throw new IllegalStateException("translog file doesn't exist with generation: " + i + " recovering from: " + - translogFileGeneration + " checkpoint: " + checkpoint.generation + " - translog ids must be consecutive"); + minGenerationToRecoverFrom + " checkpoint: " + checkpoint.generation + " - translog ids must be consecutive"); } final TranslogReader reader = openReader(committedTranslogFile, Checkpoint.read(location.resolve(getCommitCheckpointFileName(i)))); foundTranslogs.add(reader); logger.debug("recovered local translog from checkpoint {}", checkpoint); } Collections.reverse(foundTranslogs); + + // when we clean up files, we first update the checkpoint with a new minReferencedTranslog and then delete them; + // if we crash just at the wrong moment, it may be that we leave one unreferenced file behind so we delete it if there + IOUtils.deleteFilesIgnoringExceptions(location.resolve(getFilename(minGenerationToRecoverFrom - 1)), + location.resolve(getCommitCheckpointFileName(minGenerationToRecoverFrom - 1))); + Path commitCheckpoint = location.resolve(getCommitCheckpointFileName(checkpoint.generation)); if (Files.exists(commitCheckpoint)) { Checkpoint checkpointFromDisk = Checkpoint.read(commitCheckpoint); @@ -332,6 +352,20 @@ public long currentFileGeneration() { } } + /** + * Returns the minimum file generation referenced by the translog + */ + long getMinFileGeneration() { + try (ReleasableLock ignored = readLock.acquire()) { + if (readers.isEmpty()) { + return current.getGeneration(); + } else { + return readers.get(0).getGeneration(); + } + } + } + + /** * Returns the number of operations in the transaction files that aren't committed to lucene.. */ @@ -372,7 +406,6 @@ private long sizeInBytes(long minGeneration) { } } - /** * Creates a new translog for the specified generation. * @@ -381,6 +414,18 @@ private long sizeInBytes(long minGeneration) { * @throws IOException if creating the translog failed */ TranslogWriter createWriter(long fileGeneration) throws IOException { + return createWriter(fileGeneration, getMinFileGeneration()); + } + + /** + * creates a new writer + * + * @param fileGeneration the generation of the write to be written + * @param initialMinTranslogGen the minimum translog generation to be written in the first checkpoint. This is + * needed to solve and initialization problem while constructing an empty translog. + * With no readers and no current, a call to {@link #getMinFileGeneration()} would not work. + */ + private TranslogWriter createWriter(long fileGeneration, long initialMinTranslogGen) throws IOException { final TranslogWriter newFile; try { newFile = TranslogWriter.create( @@ -390,7 +435,9 @@ TranslogWriter createWriter(long fileGeneration) throws IOException { location.resolve(getFilename(fileGeneration)), getChannelFactory(), config.getBufferSize(), - globalCheckpointSupplier); + globalCheckpointSupplier, + initialMinTranslogGen, + this::getMinFileGeneration); } catch (final IOException e) { throw new TranslogException(shardId, "failed to create new translog file", e); } @@ -494,12 +541,18 @@ public long getLastSyncedGlobalCheckpoint() { * Snapshots are fixed in time and will not be updated with future operations. */ public Snapshot newSnapshot() { - return createSnapshot(Long.MIN_VALUE); + try (ReleasableLock ignored = readLock.acquire()) { + return newSnapshot(getMinFileGeneration()); + } } - private Snapshot createSnapshot(long minGeneration) { + public Snapshot newSnapshot(long minGeneration) { try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); + if (minGeneration < getMinFileGeneration()) { + throw new IllegalArgumentException("requested snapshot generation [" + minGeneration + "] is not available. " + + "Min referenced generation is [" + getMinFileGeneration() + "]"); + } Snapshot[] snapshots = Stream.concat(readers.stream(), Stream.of(current)) .filter(reader -> reader.getGeneration() >= minGeneration) .map(BaseTranslogReader::newSnapshot).toArray(Snapshot[]::new); @@ -673,7 +726,7 @@ public long sizeInBytes() { /** create a snapshot from this view */ public Snapshot snapshot() { ensureOpen(); - return Translog.this.createSnapshot(minGeneration); + return Translog.this.newSnapshot(minGeneration); } void ensureOpen() { @@ -868,8 +921,8 @@ public static class Index implements Operation { private final String id; private final long autoGeneratedIdTimestamp; private final String type; - private long seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; - private long primaryTerm = 0; + private final long seqNo; + private final long primaryTerm; private final long version; private final VersionType versionType; private final BytesReference source; @@ -899,6 +952,9 @@ public Index(StreamInput in) throws IOException { if (format >= FORMAT_SEQ_NO) { seqNo = in.readLong(); primaryTerm = in.readLong(); + } else { + seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; + primaryTerm = 0; } } @@ -925,6 +981,7 @@ public Index(String type, String id, long seqNo, long version, VersionType versi this.id = id; this.source = new BytesArray(source); this.seqNo = seqNo; + this.primaryTerm = 0; this.version = version; this.versionType = versionType; this.routing = routing; @@ -1062,27 +1119,42 @@ public long getAutoGeneratedIdTimestamp() { public static class Delete implements Operation { - private static final int FORMAT_5_X = 2; - private static final int FORMAT_SEQ_NO = FORMAT_5_X + 1; + public static final int FORMAT_5_0 = 2; // 5.0 - 5.5 + private static final int FORMAT_SINGLE_TYPE = FORMAT_5_0 + 1; // 5.5 - 6.0 + private static final int FORMAT_SEQ_NO = FORMAT_SINGLE_TYPE + 1; // 6.0 - * public static final int SERIALIZATION_FORMAT = FORMAT_SEQ_NO; - private String type, id; - private Term uid; - private long seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; - private long primaryTerm = 0; - private long version = Versions.MATCH_ANY; - private VersionType versionType = VersionType.INTERNAL; + private final String type, id; + private final Term uid; + private final long seqNo; + private final long primaryTerm; + private final long version; + private final VersionType versionType; public Delete(StreamInput in) throws IOException { final int format = in.readVInt();// SERIALIZATION_FORMAT - assert format >= FORMAT_5_X : "format was: " + format; - uid = new Term(in.readString(), in.readString()); + assert format >= FORMAT_5_0 : "format was: " + format; + if (format >= FORMAT_SINGLE_TYPE) { + type = in.readString(); + id = in.readString(); + uid = new Term(in.readString(), in.readString()); + } else { + uid = new Term(in.readString(), in.readString()); + // the uid was constructed from the type and id so we can + // extract them back + Uid uidObject = Uid.createUid(uid.text()); + type = uidObject.type(); + id = uidObject.id(); + } this.version = in.readLong(); this.versionType = VersionType.fromValue(in.readByte()); assert versionType.validateVersionForWrites(this.version); if (format >= FORMAT_SEQ_NO) { seqNo = in.readLong(); primaryTerm = in.readLong(); + } else { + seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; + primaryTerm = 0; } } @@ -1096,8 +1168,8 @@ public Delete(String type, String id, long seqNo, Term uid) { } public Delete(String type, String id, Term uid, long seqNo, long primaryTerm, long version, VersionType versionType) { - this.type = type; - this.id = id; + this.type = Objects.requireNonNull(type); + this.id = Objects.requireNonNull(id); this.uid = uid; this.seqNo = seqNo; this.primaryTerm = primaryTerm; @@ -1153,6 +1225,8 @@ public Source getSource() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(SERIALIZATION_FORMAT); + out.writeString(type); + out.writeString(id); out.writeString(uid.field()); out.writeString(uid.text()); out.writeLong(version); @@ -1442,30 +1516,58 @@ public void rollGeneration() throws IOException { * Trims unreferenced translog generations by asking {@link TranslogDeletionPolicy} for the minimum * required generation */ - public void trimUnreferencedReaders() { + public void trimUnreferencedReaders() throws IOException { try (ReleasableLock ignored = writeLock.acquire()) { if (closed.get()) { // we're shutdown potentially on some tragic event, don't delete anything return; } long minReferencedGen = deletionPolicy.minTranslogGenRequired(); - final long minExistingGen = readers.isEmpty() ? current.getGeneration() : readers.get(0).getGeneration(); - assert minReferencedGen >= minExistingGen : + assert minReferencedGen >= getMinFileGeneration() : "deletion policy requires a minReferenceGen of [" + minReferencedGen + "] but the lowest gen available is [" - + minExistingGen + "]"; - final List unreferenced = - readers.stream().filter(r -> r.getGeneration() < minReferencedGen).collect(Collectors.toList()); - for (final TranslogReader unreferencedReader : unreferenced) { - final Path translogPath = unreferencedReader.path(); + + getMinFileGeneration() + "]"; + assert minReferencedGen <= currentFileGeneration() : + "deletion policy requires a minReferenceGen of [" + minReferencedGen + "] which is higher than the current generation [" + + currentFileGeneration() + "]"; + + + for (Iterator iterator = readers.iterator(); iterator.hasNext(); ) { + TranslogReader reader = iterator.next(); + if (reader.getGeneration() >= minReferencedGen) { + break; + } + iterator.remove(); + IOUtils.closeWhileHandlingException(reader); + final Path translogPath = reader.path(); logger.trace("delete translog file [{}], not referenced and not current anymore", translogPath); - IOUtils.closeWhileHandlingException(unreferencedReader); - IOUtils.deleteFilesIgnoringExceptions(translogPath, - translogPath.resolveSibling(getCommitCheckpointFileName(unreferencedReader.getGeneration()))); + // The checkpoint is used when opening the translog to know which files should be recovered from. + // We now update the checkpoint to ignore the file we are going to remove. + // Note that there is a provision in recoverFromFiles to allow for the case where we synced the checkpoint + // but crashed before we could delete the file. + current.sync(); + deleteReaderFiles(reader); } - readers.removeAll(unreferenced); + assert readers.isEmpty() == false || current.generation == minReferencedGen : + "all readers were cleaned but the minReferenceGen [" + minReferencedGen + "] is not the current writer's gen [" + + current.generation + "]"; + } catch (Exception ex) { + try { + closeOnTragicEvent(ex); + } catch (final Exception inner) { + ex.addSuppressed(inner); + } + throw ex; } } + /** + * deletes all files associated with a reader. package-private to be able to simulate node failures at this point + */ + void deleteReaderFiles(TranslogReader reader) { + IOUtils.deleteFilesIgnoringExceptions(reader.path(), + reader.path().resolveSibling(getCommitCheckpointFileName(reader.getGeneration()))); + } + void closeFilesIfNoPendingViews() throws IOException { try (ReleasableLock ignored = writeLock.acquire()) { if (closed.get() && deletionPolicy.pendingViewsCount() == 0) { diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index 4a98365e02fba..d637c9da79f65 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -71,6 +71,7 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { private volatile long maxSeqNo; private final LongSupplier globalCheckpointSupplier; + private final LongSupplier minTranslogGenerationSupplier; protected final AtomicBoolean closed = new AtomicBoolean(false); // lock order synchronized(syncLock) -> synchronized(this) @@ -85,10 +86,11 @@ private TranslogWriter( final FileChannel channel, final Path path, final ByteSizeValue bufferSize, - final LongSupplier globalCheckpointSupplier) throws IOException { + final LongSupplier globalCheckpointSupplier, LongSupplier minTranslogGenerationSupplier) throws IOException { super(initialCheckpoint.generation, channel, path, channel.position()); this.shardId = shardId; this.channelFactory = channelFactory; + this.minTranslogGenerationSupplier = minTranslogGenerationSupplier; this.outputStream = new BufferedChannelOutputStream(java.nio.channels.Channels.newOutputStream(channel), bufferSize.bytesAsInt()); this.lastSyncedCheckpoint = initialCheckpoint; this.totalOffset = initialCheckpoint.offset; @@ -121,7 +123,9 @@ public static TranslogWriter create( Path file, ChannelFactory channelFactory, ByteSizeValue bufferSize, - final LongSupplier globalCheckpointSupplier) throws IOException { + final LongSupplier globalCheckpointSupplier, + final long initialMinTranslogGen, + final LongSupplier minTranslogGenerationSupplier) throws IOException { final BytesRef ref = new BytesRef(translogUUID); final int headerLength = getHeaderLength(ref.length); final FileChannel channel = channelFactory.open(file); @@ -132,9 +136,11 @@ public static TranslogWriter create( writeHeader(out, ref); channel.force(true); final Checkpoint checkpoint = - Checkpoint.emptyTranslogCheckpoint(headerLength, fileGeneration, globalCheckpointSupplier.getAsLong()); + Checkpoint.emptyTranslogCheckpoint(headerLength, fileGeneration, globalCheckpointSupplier.getAsLong(), + initialMinTranslogGen); writeCheckpoint(channelFactory, file.getParent(), checkpoint); - return new TranslogWriter(channelFactory, shardId, checkpoint, channel, file, bufferSize, globalCheckpointSupplier); + return new TranslogWriter(channelFactory, shardId, checkpoint, channel, file, bufferSize, globalCheckpointSupplier, + minTranslogGenerationSupplier); } catch (Exception exception) { // if we fail to bake the file-generation into the checkpoint we stick with the file and once we recover and that // file exists we remove it. We only apply this logic to the checkpoint.generation+1 any other file with a higher generation is an error condition @@ -242,7 +248,9 @@ public void sync() throws IOException { * checkpoint has not yet been fsynced */ public boolean syncNeeded() { - return totalOffset != lastSyncedCheckpoint.offset || globalCheckpointSupplier.getAsLong() != lastSyncedCheckpoint.globalCheckpoint; + return totalOffset != lastSyncedCheckpoint.offset || + globalCheckpointSupplier.getAsLong() != lastSyncedCheckpoint.globalCheckpoint || + minTranslogGenerationSupplier.getAsLong() != lastSyncedCheckpoint.minTranslogGeneration; } @Override @@ -330,6 +338,7 @@ public boolean syncUpTo(long offset) throws IOException { final long currentMinSeqNo; final long currentMaxSeqNo; final long currentGlobalCheckpoint; + final long currentMinTranslogGeneration; synchronized (this) { ensureOpen(); try { @@ -339,6 +348,7 @@ public boolean syncUpTo(long offset) throws IOException { currentMinSeqNo = minSeqNo; currentMaxSeqNo = maxSeqNo; currentGlobalCheckpoint = globalCheckpointSupplier.getAsLong(); + currentMinTranslogGeneration = minTranslogGenerationSupplier.getAsLong(); } catch (Exception ex) { try { closeWithTragicEvent(ex); @@ -354,7 +364,8 @@ public boolean syncUpTo(long offset) throws IOException { try { channel.force(false); checkpoint = - writeCheckpoint(channelFactory, offsetToSync, opsCounter, currentMinSeqNo, currentMaxSeqNo, currentGlobalCheckpoint, path.getParent(), generation); + writeCheckpoint(channelFactory, offsetToSync, opsCounter, currentMinSeqNo, currentMaxSeqNo, + currentGlobalCheckpoint, currentMinTranslogGeneration, path.getParent(), generation); } catch (Exception ex) { try { closeWithTragicEvent(ex); @@ -398,9 +409,11 @@ private static Checkpoint writeCheckpoint( long minSeqNo, long maxSeqNo, long globalCheckpoint, + long minTranslogGeneration, Path translogFile, long generation) throws IOException { - final Checkpoint checkpoint = new Checkpoint(syncPosition, numOperations, generation, minSeqNo, maxSeqNo, globalCheckpoint); + final Checkpoint checkpoint = + new Checkpoint(syncPosition, numOperations, generation, minSeqNo, maxSeqNo, globalCheckpoint, minTranslogGeneration); writeCheckpoint(channelFactory, translogFile, checkpoint); return checkpoint; } diff --git a/core/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java b/core/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java index ea1f4c13dfd6a..408691692cacf 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java @@ -168,8 +168,8 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th /** Write a checkpoint file to the given location with the given generation */ public static void writeEmptyCheckpoint(Path filename, int translogLength, long translogGeneration) throws IOException { - Checkpoint emptyCheckpoint = - Checkpoint.emptyTranslogCheckpoint(translogLength, translogGeneration, SequenceNumbersService.UNASSIGNED_SEQ_NO); + Checkpoint emptyCheckpoint = Checkpoint.emptyTranslogCheckpoint(translogLength, translogGeneration, + SequenceNumbersService.UNASSIGNED_SEQ_NO, translogGeneration); Checkpoint.write(FileChannel::open, filename, emptyCheckpoint, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW); // fsync with metadata here to make sure. diff --git a/core/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java b/core/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java index 08669188a9fdb..25772d2d9a4d0 100644 --- a/core/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java +++ b/core/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java @@ -22,6 +22,10 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.script.TemplateScript; import java.util.ArrayList; import java.util.Arrays; @@ -29,6 +33,8 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.script.Script.DEFAULT_TEMPLATE_LANG; + public final class ConfigurationUtils { public static final String TAG_KEY = "tag"; @@ -265,10 +271,24 @@ public static List readProcessorConfigs(List new TemplateScript(params) { + @Override + public String execute() { + return propertyValue; + } + }; + } } catch (Exception e) { throw ConfigurationUtils.newConfigurationException(processorType, processorTag, propertyName, e); } diff --git a/core/src/main/java/org/elasticsearch/ingest/IngestDocument.java b/core/src/main/java/org/elasticsearch/ingest/IngestDocument.java index 05b92b5772377..2ebb919d51bb3 100644 --- a/core/src/main/java/org/elasticsearch/ingest/IngestDocument.java +++ b/core/src/main/java/org/elasticsearch/ingest/IngestDocument.java @@ -27,6 +27,8 @@ import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TypeFieldMapper; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.TemplateScript; import java.time.ZoneOffset; import java.time.ZonedDateTime; @@ -144,7 +146,7 @@ public T getFieldValue(String path, Class clazz, boolean ignoreMissing) { * @throws IllegalArgumentException if the pathTemplate is null, empty, invalid, if the field doesn't exist, * or if the field that is found at the provided path is not of the expected type. */ - public T getFieldValue(TemplateService.Template pathTemplate, Class clazz) { + public T getFieldValue(TemplateScript.Factory pathTemplate, Class clazz) { return getFieldValue(renderTemplate(pathTemplate), clazz); } @@ -191,7 +193,7 @@ public byte[] getFieldValueAsBytes(String path, boolean ignoreMissing) { * @return true if the document contains a value for the field, false otherwise * @throws IllegalArgumentException if the path is null, empty or invalid */ - public boolean hasField(TemplateService.Template fieldPathTemplate) { + public boolean hasField(TemplateScript.Factory fieldPathTemplate) { return hasField(renderTemplate(fieldPathTemplate)); } @@ -280,7 +282,7 @@ public boolean hasField(String path, boolean failOutOfRange) { * @param fieldPathTemplate Resolves to the path with dot-notation within the document * @throws IllegalArgumentException if the path is null, empty, invalid or if the field doesn't exist. */ - public void removeField(TemplateService.Template fieldPathTemplate) { + public void removeField(TemplateScript.Factory fieldPathTemplate) { removeField(renderTemplate(fieldPathTemplate)); } @@ -391,9 +393,9 @@ public void appendFieldValue(String path, Object value) { * @param valueSource The value source that will produce the value or values to append to the existing ones * @throws IllegalArgumentException if the path is null, empty or invalid. */ - public void appendFieldValue(TemplateService.Template fieldPathTemplate, ValueSource valueSource) { + public void appendFieldValue(TemplateScript.Factory fieldPathTemplate, ValueSource valueSource) { Map model = createTemplateModel(); - appendFieldValue(fieldPathTemplate.execute(model), valueSource.copyAndResolve(model)); + appendFieldValue(fieldPathTemplate.newInstance(model).execute(), valueSource.copyAndResolve(model)); } /** @@ -419,9 +421,9 @@ public void setFieldValue(String path, Object value) { * @throws IllegalArgumentException if the path is null, empty, invalid or if the value cannot be set to the * item identified by the provided path. */ - public void setFieldValue(TemplateService.Template fieldPathTemplate, ValueSource valueSource) { + public void setFieldValue(TemplateScript.Factory fieldPathTemplate, ValueSource valueSource) { Map model = createTemplateModel(); - setFieldValue(fieldPathTemplate.execute(model), valueSource.copyAndResolve(model), false); + setFieldValue(fieldPathTemplate.newInstance(model).execute(), valueSource.copyAndResolve(model), false); } private void setFieldValue(String path, Object value, boolean append) { @@ -549,8 +551,8 @@ private static T cast(String path, Object object, Class clazz) { clazz.getName() + "]"); } - public String renderTemplate(TemplateService.Template template) { - return template.execute(createTemplateModel()); + public String renderTemplate(TemplateScript.Factory template) { + return template.newInstance(createTemplateModel()).execute(); } private Map createTemplateModel() { diff --git a/core/src/main/java/org/elasticsearch/ingest/IngestService.java b/core/src/main/java/org/elasticsearch/ingest/IngestService.java index 1455e37588a8c..d869d8921240c 100644 --- a/core/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/core/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -49,9 +49,7 @@ public class IngestService { public IngestService(ClusterSettings clusterSettings, Settings settings, ThreadPool threadPool, Environment env, ScriptService scriptService, AnalysisRegistry analysisRegistry, List ingestPlugins) { - - final TemplateService templateService = new InternalTemplateService(scriptService); - Processor.Parameters parameters = new Processor.Parameters(env, scriptService, templateService, + Processor.Parameters parameters = new Processor.Parameters(env, scriptService, analysisRegistry, threadPool.getThreadContext()); Map processorFactories = new HashMap<>(); for (IngestPlugin ingestPlugin : ingestPlugins) { diff --git a/core/src/main/java/org/elasticsearch/ingest/InternalTemplateService.java b/core/src/main/java/org/elasticsearch/ingest/InternalTemplateService.java deleted file mode 100644 index b066a0698e1ca..0000000000000 --- a/core/src/main/java/org/elasticsearch/ingest/InternalTemplateService.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.ingest; - -import java.util.Collections; -import java.util.Map; - -import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.script.ScriptType; -import org.elasticsearch.script.TemplateScript; - -public class InternalTemplateService implements TemplateService { - - private final ScriptService scriptService; - - InternalTemplateService(ScriptService scriptService) { - this.scriptService = scriptService; - } - - @Override - public Template compile(String template) { - int mustacheStart = template.indexOf("{{"); - int mustacheEnd = template.indexOf("}}"); - if (mustacheStart != -1 && mustacheEnd != -1 && mustacheStart < mustacheEnd) { - Script script = new Script(ScriptType.INLINE, "mustache", template, Collections.emptyMap()); - TemplateScript.Factory compiledTemplate = scriptService.compile(script, TemplateScript.CONTEXT); - return new Template() { - @Override - public String execute(Map model) { - return compiledTemplate.newInstance(model).execute(); - } - - @Override - public String getKey() { - return template; - } - }; - } else { - return new StringTemplate(template); - } - } - - class StringTemplate implements Template { - - private final String value; - - StringTemplate(String value) { - this.value = value; - } - - @Override - public String execute(Map model) { - return value; - } - - @Override - public String getKey() { - return value; - } - } -} diff --git a/core/src/main/java/org/elasticsearch/ingest/Processor.java b/core/src/main/java/org/elasticsearch/ingest/Processor.java index 228ca5f4930e4..39d74fb09a945 100644 --- a/core/src/main/java/org/elasticsearch/ingest/Processor.java +++ b/core/src/main/java/org/elasticsearch/ingest/Processor.java @@ -84,11 +84,6 @@ class Parameters { */ public final ScriptService scriptService; - /** - * Provides template support to pipeline settings. - */ - public final TemplateService templateService; - /** * Provide analyzer support */ @@ -100,11 +95,10 @@ class Parameters { */ public final ThreadContext threadContext; - public Parameters(Environment env, ScriptService scriptService, TemplateService templateService, + public Parameters(Environment env, ScriptService scriptService, AnalysisRegistry analysisRegistry, ThreadContext threadContext) { this.env = env; this.scriptService = scriptService; - this.templateService = templateService; this.threadContext = threadContext; this.analysisRegistry = analysisRegistry; } diff --git a/core/src/main/java/org/elasticsearch/ingest/TemplateService.java b/core/src/main/java/org/elasticsearch/ingest/TemplateService.java deleted file mode 100644 index 2ece5a9430486..0000000000000 --- a/core/src/main/java/org/elasticsearch/ingest/TemplateService.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.ingest; - -import java.util.Map; - -/** - * Abstraction for the ingest template engine used to decouple {@link IngestDocument} from {@link org.elasticsearch.script.ScriptService}. - * Allows to compile a template into an ingest {@link Template} object. - * A compiled template can be executed by calling its {@link Template#execute(Map)} method. - */ -public interface TemplateService { - - Template compile(String template); - - interface Template { - - String execute(Map model); - - String getKey(); - } -} diff --git a/core/src/main/java/org/elasticsearch/ingest/ValueSource.java b/core/src/main/java/org/elasticsearch/ingest/ValueSource.java index fa483a5fbeec3..4e2787c023539 100644 --- a/core/src/main/java/org/elasticsearch/ingest/ValueSource.java +++ b/core/src/main/java/org/elasticsearch/ingest/ValueSource.java @@ -19,13 +19,21 @@ package org.elasticsearch.ingest; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.script.TemplateScript; + import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import static org.elasticsearch.script.Script.DEFAULT_TEMPLATE_LANG; + /** * Holds a value. If the value is requested a copy is made and optionally template snippets are resolved too. */ @@ -41,13 +49,14 @@ public interface ValueSource { */ Object copyAndResolve(Map model); - static ValueSource wrap(Object value, TemplateService templateService) { + static ValueSource wrap(Object value, ScriptService scriptService) { + if (value instanceof Map) { @SuppressWarnings("unchecked") Map mapValue = (Map) value; Map valueTypeMap = new HashMap<>(mapValue.size()); for (Map.Entry entry : mapValue.entrySet()) { - valueTypeMap.put(wrap(entry.getKey(), templateService), wrap(entry.getValue(), templateService)); + valueTypeMap.put(wrap(entry.getKey(), scriptService), wrap(entry.getValue(), scriptService)); } return new MapValue(valueTypeMap); } else if (value instanceof List) { @@ -55,7 +64,7 @@ static ValueSource wrap(Object value, TemplateService templateService) { List listValue = (List) value; List valueSourceList = new ArrayList<>(listValue.size()); for (Object item : listValue) { - valueSourceList.add(wrap(item, templateService)); + valueSourceList.add(wrap(item, scriptService)); } return new ListValue(valueSourceList); } else if (value == null || value instanceof Number || value instanceof Boolean) { @@ -63,7 +72,15 @@ static ValueSource wrap(Object value, TemplateService templateService) { } else if (value instanceof byte[]) { return new ByteValue((byte[]) value); } else if (value instanceof String) { - return new TemplatedValue(templateService.compile((String) value)); + // This check is here because the DEFAULT_TEMPLATE_LANG(mustache) is not + // installed for use by REST tests. `value` will not be + // modified if templating is not available + if (scriptService.isLangSupported(DEFAULT_TEMPLATE_LANG)) { + Script script = new Script(ScriptType.INLINE, DEFAULT_TEMPLATE_LANG, (String) value, Collections.emptyMap()); + return new TemplatedValue(scriptService.compile(script, TemplateScript.CONTEXT)); + } else { + return new ObjectValue(value); + } } else { throw new IllegalArgumentException("unexpected value type [" + value.getClass() + "]"); } @@ -194,15 +211,15 @@ public int hashCode() { final class TemplatedValue implements ValueSource { - private final TemplateService.Template template; + private final TemplateScript.Factory template; - TemplatedValue(TemplateService.Template template) { + TemplatedValue(TemplateScript.Factory template) { this.template = template; } @Override public Object copyAndResolve(Map model) { - return template.execute(model); + return template.newInstance(model).execute(); } @Override @@ -211,12 +228,12 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; TemplatedValue templatedValue = (TemplatedValue) o; - return Objects.equals(template.getKey(), templatedValue.template.getKey()); + return Objects.equals(template, templatedValue.template); } @Override public int hashCode() { - return Objects.hashCode(template.getKey()); + return Objects.hashCode(template); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetStoredScriptAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetStoredScriptAction.java index c0ea3c6ff8bdd..f6299fcac58ce 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetStoredScriptAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetStoredScriptAction.java @@ -98,7 +98,7 @@ public RestResponse buildResponse(GetStoredScriptResponse response, XContentBuil if (lang == null) { builder.startObject(StoredScriptSource.SCRIPT_PARSE_FIELD.getPreferredName()); builder.field(StoredScriptSource.LANG_PARSE_FIELD.getPreferredName(), source.getLang()); - builder.field(StoredScriptSource.CODE_PARSE_FIELD.getPreferredName(), source.getCode()); + builder.field(StoredScriptSource.SOURCE_PARSE_FIELD.getPreferredName(), source.getSource()); if (source.getOptions().isEmpty() == false) { builder.field(StoredScriptSource.OPTIONS_PARSE_FIELD.getPreferredName(), source.getOptions()); @@ -106,7 +106,7 @@ public RestResponse buildResponse(GetStoredScriptResponse response, XContentBuil builder.endObject(); } else { - builder.field(StoredScriptSource.SCRIPT_PARSE_FIELD.getPreferredName(), source.getCode()); + builder.field(StoredScriptSource.SCRIPT_PARSE_FIELD.getPreferredName(), source.getSource()); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java index ffb34c6b939cb..51ff743d2d128 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java @@ -76,6 +76,7 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final boolean namesProvided = request.hasParam("name"); final String[] aliases = request.paramAsStringArrayOrEmptyIfAll("name"); final GetAliasesRequest getAliasesRequest = new GetAliasesRequest(aliases); final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); @@ -89,9 +90,13 @@ public RestResponse buildResponse(GetAliasesResponse response, XContentBuilder b final ImmutableOpenMap> aliasMap = response.getAliases(); final Set aliasNames = new HashSet<>(); - for (final ObjectCursor> cursor : aliasMap.values()) { + final Set indicesToDisplay = new HashSet<>(); + for (final ObjectObjectCursor> cursor : aliasMap) { for (final AliasMetaData aliasMetaData : cursor.value) { aliasNames.add(aliasMetaData.alias()); + if (namesProvided) { + indicesToDisplay.add(cursor.key); + } } } @@ -131,17 +136,19 @@ public RestResponse buildResponse(GetAliasesResponse response, XContentBuilder b } for (final ObjectObjectCursor> entry : response.getAliases()) { - builder.startObject(entry.key); - { - builder.startObject("aliases"); + if (namesProvided == false || (namesProvided && indicesToDisplay.contains(entry.key))) { + builder.startObject(entry.key); { - for (final AliasMetaData alias : entry.value) { - AliasMetaData.Builder.toXContent(alias, builder, ToXContent.EMPTY_PARAMS); + builder.startObject("aliases"); + { + for (final AliasMetaData alias : entry.value) { + AliasMetaData.Builder.toXContent(alias, builder, ToXContent.EMPTY_PARAMS); + } } + builder.endObject(); } builder.endObject(); } - builder.endObject(); } } builder.endObject(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java index d596ab238cdb5..f379f18fe71d7 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java @@ -89,9 +89,6 @@ public RestResponse buildResponse(GetMappingsResponse response, XContentBuilder builder.startObject(); for (ObjectObjectCursor> indexEntry : mappingsByIndex) { - if (indexEntry.value.isEmpty()) { - continue; - } builder.startObject(indexEntry.key); builder.startObject(Fields.MAPPINGS); for (ObjectObjectCursor typeEntry : indexEntry.value) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index 291f37ea12e67..e08fd0c652877 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -197,6 +197,10 @@ private static void parseSearchSource(final SearchSourceBuilder searchSourceBuil searchSourceBuilder.trackScores(request.paramAsBoolean("track_scores", false)); } + if (request.hasParam("track_total_hits")) { + searchSourceBuilder.trackTotalHits(request.paramAsBoolean("track_total_hits", true)); + } + String sSorts = request.param("sort"); if (sSorts != null) { String[] sorts = Strings.splitStringByCommaToArray(sSorts); diff --git a/core/src/main/java/org/elasticsearch/script/Script.java b/core/src/main/java/org/elasticsearch/script/Script.java index f9b67e31b9954..ee4f8e442f405 100644 --- a/core/src/main/java/org/elasticsearch/script/Script.java +++ b/core/src/main/java/org/elasticsearch/script/Script.java @@ -113,6 +113,11 @@ public final class Script implements ToXContentObject, Writeable { */ public static final ParseField SCRIPT_PARSE_FIELD = new ParseField("script"); + /** + * Standard {@link ParseField} for source on the inner level. + */ + public static final ParseField SOURCE_PARSE_FIELD = new ParseField("source"); + /** * Standard {@link ParseField} for lang on the inner level. */ @@ -218,9 +223,7 @@ private void setParams(Map params) { */ private Script build(String defaultLang) { if (type == null) { - throw new IllegalArgumentException( - "must specify either code for an [" + ScriptType.INLINE.getParseField().getPreferredName() + "] script " + - "or an id for a [" + ScriptType.STORED.getParseField().getPreferredName() + "] script"); + throw new IllegalArgumentException("must specify either [source] for an inline script or [id] for a stored script"); } if (type == ScriptType.INLINE) { @@ -299,7 +302,10 @@ public static Script parse(XContentParser parser) throws IOException { * * {@code * { - * "" : "", + * // Exactly one of "id" or "source" must be specified + * "id" : "", + * // OR + * "source": "", * "lang" : "", * "options" : { * "option0" : "", @@ -317,7 +323,7 @@ public static Script parse(XContentParser parser) throws IOException { * Example: * {@code * { - * "inline" : "return Math.log(doc.popularity) * params.multiplier", + * "source" : "return Math.log(doc.popularity) * params.multiplier", * "lang" : "painless", * "params" : { * "multiplier" : 100.0 @@ -330,7 +336,7 @@ public static Script parse(XContentParser parser) throws IOException { * * {@code * { - * "inline" : { "query" : ... }, + * "source" : { "query" : ... }, * "lang" : "", * "options" : { * "option0" : "", @@ -567,7 +573,7 @@ public void writeTo(StreamOutput out) throws IOException { * * {@code * { - * "" : "", + * "<(id, source)>" : "", * "lang" : "", * "options" : { * "option0" : "", @@ -585,7 +591,7 @@ public void writeTo(StreamOutput out) throws IOException { * Example: * {@code * { - * "inline" : "return Math.log(doc.popularity) * params.multiplier;", + * "source" : "return Math.log(doc.popularity) * params.multiplier;", * "lang" : "painless", * "params" : { * "multiplier" : 100.0 @@ -600,7 +606,7 @@ public void writeTo(StreamOutput out) throws IOException { * * {@code * { - * "inline" : { "query" : ... }, + * "source" : { "query" : ... }, * "lang" : "", * "options" : { * "option0" : "", @@ -621,10 +627,14 @@ public XContentBuilder toXContent(XContentBuilder builder, Params builderParams) String contentType = options == null ? null : options.get(CONTENT_TYPE_OPTION); - if (type == ScriptType.INLINE && contentType != null && builder.contentType().mediaType().equals(contentType)) { - builder.rawField(type.getParseField().getPreferredName(), new BytesArray(idOrCode)); + if (type == ScriptType.INLINE) { + if (contentType != null && builder.contentType().mediaType().equals(contentType)) { + builder.rawField(SOURCE_PARSE_FIELD.getPreferredName(), new BytesArray(idOrCode)); + } else { + builder.field(SOURCE_PARSE_FIELD.getPreferredName(), idOrCode); + } } else { - builder.field(type.getParseField().getPreferredName(), idOrCode); + builder.field("id", idOrCode); } if (lang != null) { diff --git a/core/src/main/java/org/elasticsearch/script/ScriptMetaData.java b/core/src/main/java/org/elasticsearch/script/ScriptMetaData.java index f69302ce0140b..63b5e2e46ab8d 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptMetaData.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptMetaData.java @@ -336,7 +336,7 @@ public ScriptMetaData(StreamInput in) throws IOException { throw new IllegalArgumentException("illegal stored script id [" + id + "], does not contain lang"); } else { source = new StoredScriptSource(in); - source = new StoredScriptSource(id.substring(0, split), source.getCode(), Collections.emptyMap()); + source = new StoredScriptSource(id.substring(0, split), source.getSource(), Collections.emptyMap()); } // Version 5.3+ can just be parsed normally using StoredScriptSource. } else { diff --git a/core/src/main/java/org/elasticsearch/script/ScriptService.java b/core/src/main/java/org/elasticsearch/script/ScriptService.java index 557ad6ca7cb68..a64d13c43ca4c 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptService.java @@ -256,7 +256,7 @@ public FactoryType compile(Script script, ScriptContext imp public static final ParseField LANG_PARSE_FIELD = new ParseField("lang"); /** - * Standard {@link ParseField} for code on the inner level. + * Standard {@link ParseField} for source on the inner level. */ - public static final ParseField CODE_PARSE_FIELD = new ParseField("code"); + public static final ParseField SOURCE_PARSE_FIELD = new ParseField("source", "code"); /** * Standard {@link ParseField} for options on the inner level. @@ -85,7 +85,7 @@ public class StoredScriptSource extends AbstractDiffable imp */ private static final class Builder { private String lang; - private String code; + private String source; private Map options; private Builder() { @@ -99,19 +99,19 @@ private void setLang(String lang) { /** * Since stored scripts can accept templates rather than just scripts, they must also be able - * to handle template parsing, hence the need for custom parsing code. Templates can + * to handle template parsing, hence the need for custom parsing source. Templates can * consist of either an {@link String} or a JSON object. If a JSON object is discovered * then the content type option must also be saved as a compiler option. */ - private void setCode(XContentParser parser) { + private void setSource(XContentParser parser) { try { if (parser.currentToken() == Token.START_OBJECT) { //this is really for search templates, that need to be converted to json format XContentBuilder builder = XContentFactory.jsonBuilder(); - code = builder.copyCurrentStructure(parser).string(); + source = builder.copyCurrentStructure(parser).string(); options.put(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()); } else { - code = parser.text(); + source = parser.text(); } } catch (IOException exception) { throw new UncheckedIOException(exception); @@ -136,17 +136,17 @@ private StoredScriptSource build() { throw new IllegalArgumentException("lang cannot be empty"); } - if (code == null) { - throw new IllegalArgumentException("must specify code for stored script"); - } else if (code.isEmpty()) { - throw new IllegalArgumentException("code cannot be empty"); + if (source == null) { + throw new IllegalArgumentException("must specify source for stored script"); + } else if (source.isEmpty()) { + throw new IllegalArgumentException("source cannot be empty"); } if (options.size() > 1 || options.size() == 1 && options.get(Script.CONTENT_TYPE_OPTION) == null) { throw new IllegalArgumentException("illegal compiler options [" + options + "] specified"); } - return new StoredScriptSource(lang, code, options); + return new StoredScriptSource(lang, source, options); } } @@ -155,7 +155,7 @@ private StoredScriptSource build() { static { // Defines the fields necessary to parse a Script as XContent using an ObjectParser. PARSER.declareString(Builder::setLang, LANG_PARSE_FIELD); - PARSER.declareField(Builder::setCode, parser -> parser, CODE_PARSE_FIELD, ValueType.OBJECT_OR_STRING); + PARSER.declareField(Builder::setSource, parser -> parser, SOURCE_PARSE_FIELD, ValueType.OBJECT_OR_STRING); PARSER.declareField(Builder::setOptions, XContentParser::mapStrings, OPTIONS_PARSE_FIELD, ValueType.OBJECT); } @@ -174,13 +174,13 @@ private StoredScriptSource build() { * the stored script namespaces. * * The complex script format using the new stored script namespace - * where lang and code are required but options is optional: + * where lang and source are required but options is optional: * * {@code * { * "script" : { * "lang" : "", - * "code" : "", + * "source" : "", * "options" : { * "option0" : "", * "option1" : "", @@ -195,7 +195,23 @@ private StoredScriptSource build() { * { * "script": { * "lang" : "painless", - * "code" : "return Math.log(doc.popularity) * params.multiplier" + * "source" : "return Math.log(doc.popularity) * params.multiplier" + * } + * } + * } + * + * The use of "source" may also be substituted with "code" for backcompat with 5.3 to 5.5 format. For example: + * + * {@code + * { + * "script" : { + * "lang" : "", + * "code" : "", + * "options" : { + * "option0" : "", + * "option1" : "", + * ... + * } * } * } * } @@ -219,7 +235,7 @@ private StoredScriptSource build() { * } * * Note that templates can be handled as both strings and complex JSON objects. - * Also templates may be part of the 'code' parameter in a script. The Parser + * Also templates may be part of the 'source' parameter in a script. The Parser * can handle this case as well. * * @param lang An optional parameter to allow for use of the deprecated stored @@ -267,7 +283,7 @@ public static StoredScriptSource parse(String lang, BytesReference content, XCon } } else { - throw new ParsingException(parser.getTokenLocation(), "unexpected token [" + token + "], expected [{, ]"); + throw new ParsingException(parser.getTokenLocation(), "unexpected token [" + token + "], expected [{, ]"); } } else { if (lang == null) { @@ -306,7 +322,7 @@ public static StoredScriptSource parse(String lang, BytesReference content, XCon * { * "script" : { * "lang" : "", - * "code" : "", + * "source" : "", * "options" : { * "option0" : "", * "option1" : "", @@ -316,7 +332,7 @@ public static StoredScriptSource parse(String lang, BytesReference content, XCon * } * } * - * Note that the "code" parameter can also handle template parsing including from + * Note that the "source" parameter can also handle template parsing including from * a complex JSON object. */ public static StoredScriptSource fromXContent(XContentParser parser) throws IOException { @@ -333,66 +349,66 @@ public static Diff readDiffFrom(StreamInput in) throws IOExc } private final String lang; - private final String code; + private final String source; private final Map options; /** * Constructor for use with {@link GetStoredScriptResponse} * to support the deprecated stored script namespace. */ - public StoredScriptSource(String code) { + public StoredScriptSource(String source) { this.lang = null; - this.code = Objects.requireNonNull(code); + this.source = Objects.requireNonNull(source); this.options = null; } /** * Standard StoredScriptSource constructor. * @param lang The language to compile the script with. Must not be {@code null}. - * @param code The source code to compile with. Must not be {@code null}. + * @param source The source source to compile with. Must not be {@code null}. * @param options Compiler options to be compiled with. Must not be {@code null}, * use an empty {@link Map} to represent no options. */ - public StoredScriptSource(String lang, String code, Map options) { + public StoredScriptSource(String lang, String source, Map options) { this.lang = Objects.requireNonNull(lang); - this.code = Objects.requireNonNull(code); + this.source = Objects.requireNonNull(source); this.options = Collections.unmodifiableMap(Objects.requireNonNull(options)); } /** * Reads a {@link StoredScriptSource} from a stream. Version 5.3+ will read - * all of the lang, code, and options parameters. For versions prior to 5.3, - * only the code parameter will be read in as a bytes reference. + * all of the lang, source, and options parameters. For versions prior to 5.3, + * only the source parameter will be read in as a bytes reference. */ public StoredScriptSource(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_5_3_0)) { this.lang = in.readString(); - this.code = in.readString(); + this.source = in.readString(); @SuppressWarnings("unchecked") Map options = (Map)(Map)in.readMap(); this.options = options; } else { this.lang = null; - this.code = in.readBytesReference().utf8ToString(); + this.source = in.readBytesReference().utf8ToString(); this.options = null; } } /** * Writes a {@link StoredScriptSource} to a stream. Version 5.3+ will write - * all of the lang, code, and options parameters. For versions prior to 5.3, - * only the code parameter will be read in as a bytes reference. + * all of the lang, source, and options parameters. For versions prior to 5.3, + * only the source parameter will be read in as a bytes reference. */ @Override public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_5_3_0)) { out.writeString(lang); - out.writeString(code); + out.writeString(source); @SuppressWarnings("unchecked") Map options = (Map)(Map)this.options; out.writeMap(options); } else { - out.writeBytesReference(new BytesArray(code)); + out.writeBytesReference(new BytesArray(source)); } } @@ -403,7 +419,7 @@ public void writeTo(StreamOutput out) throws IOException { * { * "script" : { * "lang" : "", - * "code" : "", + * "source" : "", * "options" : { * "option0" : "", * "option1" : "", @@ -413,13 +429,13 @@ public void writeTo(StreamOutput out) throws IOException { * } * } * - * Note that the 'code' parameter can also handle templates written as complex JSON. + * Note that the 'source' parameter can also handle templates written as complex JSON. */ @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(LANG_PARSE_FIELD.getPreferredName(), lang); - builder.field(CODE_PARSE_FIELD.getPreferredName(), code); + builder.field(SOURCE_PARSE_FIELD.getPreferredName(), source); builder.field(OPTIONS_PARSE_FIELD.getPreferredName(), options); builder.endObject(); @@ -434,10 +450,10 @@ public String getLang() { } /** - * @return The code used for compiling this script. + * @return The source used for compiling this script. */ - public String getCode() { - return code; + public String getSource() { + return source; } /** @@ -455,7 +471,7 @@ public boolean equals(Object o) { StoredScriptSource that = (StoredScriptSource)o; if (lang != null ? !lang.equals(that.lang) : that.lang != null) return false; - if (code != null ? !code.equals(that.code) : that.code != null) return false; + if (source != null ? !source.equals(that.source) : that.source != null) return false; return options != null ? options.equals(that.options) : that.options == null; } @@ -463,7 +479,7 @@ public boolean equals(Object o) { @Override public int hashCode() { int result = lang != null ? lang.hashCode() : 0; - result = 31 * result + (code != null ? code.hashCode() : 0); + result = 31 * result + (source != null ? source.hashCode() : 0); result = 31 * result + (options != null ? options.hashCode() : 0); return result; } @@ -472,7 +488,7 @@ public int hashCode() { public String toString() { return "StoredScriptSource{" + "lang='" + lang + '\'' + - ", code='" + code + '\'' + + ", source='" + source + '\'' + ", options=" + options + '}'; } diff --git a/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index 45b8967545414..8e0536adfb47a 100644 --- a/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -114,6 +114,7 @@ final class DefaultSearchContext extends SearchContext { private SortAndFormats sort; private Float minimumScore; private boolean trackScores = false; // when sorting, track scores as well... + private boolean trackTotalHits = true; private FieldDoc searchAfter; private CollapseContext collapse; private boolean lowLevelCancellation; @@ -548,6 +549,17 @@ public boolean trackScores() { return this.trackScores; } + @Override + public SearchContext trackTotalHits(boolean trackTotalHits) { + this.trackTotalHits = trackTotalHits; + return this; + } + + @Override + public boolean trackTotalHits() { + return trackTotalHits; + } + @Override public SearchContext searchAfter(FieldDoc searchAfter) { this.searchAfter = searchAfter; diff --git a/core/src/main/java/org/elasticsearch/search/SearchHits.java b/core/src/main/java/org/elasticsearch/search/SearchHits.java index f7250a7f07a0b..650ac7ea8046b 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchHits.java +++ b/core/src/main/java/org/elasticsearch/search/SearchHits.java @@ -19,6 +19,7 @@ package org.elasticsearch.search; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -178,7 +179,17 @@ public static SearchHits readSearchHits(StreamInput in) throws IOException { @Override public void readFrom(StreamInput in) throws IOException { - totalHits = in.readVLong(); + final boolean hasTotalHits; + if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha3)) { + hasTotalHits = in.readBoolean(); + } else { + hasTotalHits = true; + } + if (hasTotalHits) { + totalHits = in.readVLong(); + } else { + totalHits = -1; + } maxScore = in.readFloat(); int size = in.readVInt(); if (size == 0) { @@ -193,7 +204,17 @@ public void readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(totalHits); + final boolean hasTotalHits; + if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha3)) { + hasTotalHits = totalHits >= 0; + out.writeBoolean(hasTotalHits); + } else { + assert totalHits >= 0; + hasTotalHits = true; + } + if (hasTotalHits) { + out.writeVLong(totalHits); + } out.writeFloat(maxScore); out.writeVInt(hits.length); if (hits.length > 0) { diff --git a/core/src/main/java/org/elasticsearch/search/SearchModule.java b/core/src/main/java/org/elasticsearch/search/SearchModule.java index 16bd9dbe8b931..87d937c09954c 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/core/src/main/java/org/elasticsearch/search/SearchModule.java @@ -229,7 +229,6 @@ import org.elasticsearch.search.fetch.subphase.highlight.HighlightPhase; import org.elasticsearch.search.fetch.subphase.highlight.Highlighter; import org.elasticsearch.search.fetch.subphase.highlight.PlainHighlighter; -import org.elasticsearch.search.fetch.subphase.highlight.PostingsHighlighter; import org.elasticsearch.search.fetch.subphase.highlight.UnifiedHighlighter; import org.elasticsearch.search.rescore.QueryRescorerBuilder; import org.elasticsearch.search.rescore.RescoreBuilder; @@ -574,7 +573,6 @@ private Map setupHighlighters(Settings settings, List highlighters = new NamedRegistry<>("highlighter"); highlighters.register("fvh", new FastVectorHighlighter(settings)); highlighters.register("plain", new PlainHighlighter()); - highlighters.register("postings", new PostingsHighlighter()); highlighters.register("unified", new UnifiedHighlighter()); highlighters.extractAndRegister(plugins, SearchPlugin::getHighlighters); diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index f960d8eb0bae4..7e691e441b5ac 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -635,6 +635,10 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc } } context.trackScores(source.trackScores()); + if (source.trackTotalHits() == false && context.scrollContext() != null) { + throw new SearchContextException(context, "disabling [track_total_hits] is not allowed in a scroll context"); + } + context.trackTotalHits(source.trackTotalHits()); if (source.minScore() != null) { context.minimumScore(source.minScore()); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java index 8f8ed143ca3a6..6732db9cecf86 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java @@ -172,7 +172,7 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBuc // no need to provide deleted docs to the filter final Bits[] bits = new Bits[filters.length + totalNumIntersections]; for (int i = 0; i < filters.length; ++i) { - bits[i] = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), filters[i].scorer(ctx)); + bits[i] = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), filters[i].scorerSupplier(ctx)); } // Add extra Bits for intersections int pos = filters.length; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java index 5313bdade8028..46a9049711f85 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java @@ -56,7 +56,7 @@ public FilterAggregator(String name, public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { // no need to provide deleted docs to the filter - final Bits bits = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), filter.scorer(ctx)); + final Bits bits = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), filter.scorerSupplier(ctx)); return new LeafBucketCollectorBase(sub, null) { @Override public void collect(int doc, long bucket) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java index a225d5f92f0b7..8e65ca4fbe300 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java @@ -144,7 +144,7 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, // no need to provide deleted docs to the filter final Bits[] bits = new Bits[filters.length]; for (int i = 0; i < filters.length; ++i) { - bits[i] = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), filters[i].scorer(ctx)); + bits[i] = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), filters[i].scorerSupplier(ctx)); } return new LeafBucketCollectorBase(sub, null) { @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java index 0592ccf7cfc1d..7be81c08120c9 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java @@ -33,6 +33,7 @@ import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; +import java.util.Arrays; import java.util.List; import java.util.Map; @@ -114,7 +115,7 @@ public static ExecutionMode fromString(String value) { return mode; } } - throw new IllegalArgumentException("Unknown `execution_hint`: [" + value + "], expected any of " + values()); + throw new IllegalArgumentException("Unknown `execution_hint`: [" + value + "], expected any of " + Arrays.toString(values())); } private final ParseField parseField; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java index ba5191b122946..65819736d5419 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java @@ -53,6 +53,7 @@ import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; +import java.util.Arrays; import java.util.List; import java.util.Map; @@ -327,7 +328,7 @@ public static ExecutionMode fromString(String value) { return mode; } } - throw new IllegalArgumentException("Unknown `execution_hint`: [" + value + "], expected any of " + values()); + throw new IllegalArgumentException("Unknown `execution_hint`: [" + value + "], expected any of " + Arrays.toString(values())); } private final ParseField parseField; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 627e1808e4200..086bbd9f02239 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -41,6 +41,7 @@ import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; +import java.util.Arrays; import java.util.List; import java.util.Map; @@ -359,7 +360,7 @@ public static ExecutionMode fromString(String value) { return mode; } } - throw new IllegalArgumentException("Unknown `execution_hint`: [" + value + "], expected any of " + values()); + throw new IllegalArgumentException("Unknown `execution_hint`: [" + value + "], expected any of " + Arrays.toString(values())); } private final ParseField parseField; diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 684350076b876..be39ce3698cad 100644 --- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -92,6 +92,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ public static final ParseField IGNORE_FAILURE_FIELD = new ParseField("ignore_failure"); public static final ParseField SORT_FIELD = new ParseField("sort"); public static final ParseField TRACK_SCORES_FIELD = new ParseField("track_scores"); + public static final ParseField TRACK_TOTAL_HITS_FIELD = new ParseField("track_total_hits"); public static final ParseField INDICES_BOOST_FIELD = new ParseField("indices_boost"); public static final ParseField AGGREGATIONS_FIELD = new ParseField("aggregations"); public static final ParseField AGGS_FIELD = new ParseField("aggs"); @@ -142,6 +143,8 @@ public static HighlightBuilder highlight() { private boolean trackScores = false; + private boolean trackTotalHits = true; + private SearchAfterBuilder searchAfterBuilder; private SliceBuilder sliceBuilder; @@ -224,6 +227,11 @@ public SearchSourceBuilder(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_5_3_0)) { collapse = in.readOptionalWriteable(CollapseBuilder::new); } + if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha3)) { + trackTotalHits = in.readBoolean(); + } else { + trackTotalHits = true; + } } @Override @@ -275,6 +283,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_5_3_0)) { out.writeOptionalWriteable(collapse); } + if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha3)) { + out.writeBoolean(trackTotalHits); + } } /** @@ -489,6 +500,17 @@ public boolean trackScores() { return trackScores; } + /** + * Indicates if the total hit count for the query should be tracked. + */ + public boolean trackTotalHits() { + return trackTotalHits; + } + + public SearchSourceBuilder trackTotalHits(boolean trackTotalHits) { + this.trackTotalHits = trackTotalHits; + return this; + } /** * The sort values that indicates which docs this request should "search after". @@ -926,6 +948,7 @@ private SearchSourceBuilder shallowCopy(QueryBuilder queryBuilder, QueryBuilder rewrittenBuilder.terminateAfter = terminateAfter; rewrittenBuilder.timeout = timeout; rewrittenBuilder.trackScores = trackScores; + rewrittenBuilder.trackTotalHits = trackTotalHits; rewrittenBuilder.version = version; rewrittenBuilder.collapse = collapse; return rewrittenBuilder; @@ -964,6 +987,8 @@ public void parseXContent(QueryParseContext context) throws IOException { explain = parser.booleanValue(); } else if (TRACK_SCORES_FIELD.match(currentFieldName)) { trackScores = parser.booleanValue(); + } else if (TRACK_TOTAL_HITS_FIELD.match(currentFieldName)) { + trackTotalHits = parser.booleanValue(); } else if (_SOURCE_FIELD.match(currentFieldName)) { fetchSourceContext = FetchSourceContext.fromXContent(context.parser()); } else if (STORED_FIELDS_FIELD.match(currentFieldName)) { @@ -1174,6 +1199,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(TRACK_SCORES_FIELD.getPreferredName(), true); } + if (trackTotalHits == false) { + builder.field(TRACK_TOTAL_HITS_FIELD.getPreferredName(), false); + } + if (searchAfterBuilder != null) { builder.array(SEARCH_AFTER.getPreferredName(), searchAfterBuilder.getSortValues()); } @@ -1433,7 +1462,7 @@ public int hashCode() { return Objects.hash(aggregations, explain, fetchSourceContext, docValueFields, storedFieldsContext, from, highlightBuilder, indexBoosts, minScore, postQueryBuilder, queryBuilder, rescoreBuilders, scriptFields, size, sorts, searchAfterBuilder, sliceBuilder, stats, suggestBuilder, terminateAfter, timeout, trackScores, version, - profile, extBuilders, collapse); + profile, extBuilders, collapse, trackTotalHits); } @Override @@ -1470,6 +1499,7 @@ public boolean equals(Object obj) { && Objects.equals(version, other.version) && Objects.equals(profile, other.profile) && Objects.equals(extBuilders, other.extBuilders) - && Objects.equals(collapse, other.collapse); + && Objects.equals(collapse, other.collapse) + && Objects.equals(trackTotalHits, other.trackTotalHits); } } diff --git a/core/src/main/java/org/elasticsearch/search/collapse/CollapseContext.java b/core/src/main/java/org/elasticsearch/search/collapse/CollapseContext.java index cb1587cd7d9e9..9e329da9b0075 100644 --- a/core/src/main/java/org/elasticsearch/search/collapse/CollapseContext.java +++ b/core/src/main/java/org/elasticsearch/search/collapse/CollapseContext.java @@ -56,7 +56,7 @@ public List getInnerHit() { return innerHits; } - public CollapsingTopDocsCollector createTopDocs(Sort sort, int topN, boolean trackMaxScore) throws IOException { + public CollapsingTopDocsCollector createTopDocs(Sort sort, int topN, boolean trackMaxScore) { if (fieldType instanceof KeywordFieldMapper.KeywordFieldType) { return CollapsingTopDocsCollector.createKeyword(fieldType.name(), sort, topN, trackMaxScore); } else if (fieldType instanceof NumberFieldMapper.NumberFieldType) { diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesFetchSubPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesFetchSubPhase.java index e38090ee4d858..c28e07ff45526 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesFetchSubPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesFetchSubPhase.java @@ -22,7 +22,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.search.Query; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.elasticsearch.ExceptionsHelper; @@ -78,8 +78,8 @@ public void hitsExecute(SearchContext context, SearchHit[] hits) { LeafReaderContext ctx = indexReader.leaves().get(readerIndex); docBase = ctx.docBase; // scorers can be costly to create, so reuse them across docs of the same segment - Scorer scorer = weight.scorer(ctx); - matchingDocs = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), scorer); + ScorerSupplier scorerSupplier = weight.scorerSupplier(ctx); + matchingDocs = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), scorerSupplier); } if (matchingDocs.get(hit.docId() - docBase)) { matchedQueries[i].add(name); diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java index e5db6639ad82a..e6e50cc37e6fe 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java @@ -262,8 +262,8 @@ public Integer numOfFragments() { /** * Set type of highlighter to use. Out of the box supported types - * are plain, fvh and postings. - * The default option selected is dependent on the mappings defined for your index. + * are unified, plain and fvj. + * Defaults to unified. * Details of the different highlighter types are covered in the reference guide. */ @SuppressWarnings("unchecked") diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java index b1d557e851a6c..c08eea2e58820 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java @@ -50,7 +50,6 @@ import java.util.Map; public class FastVectorHighlighter implements Highlighter { - private static final BoundaryScanner DEFAULT_SIMPLE_BOUNDARY_SCANNER = new SimpleBoundaryScanner(); private static final BoundaryScanner DEFAULT_SENTENCE_BOUNDARY_SCANNER = new BreakIteratorBoundaryScanner(BreakIterator.getSentenceInstance(Locale.ROOT)); diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightPhase.java index 701b981e0f053..6b9121b8f7b71 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightPhase.java @@ -39,8 +39,6 @@ import java.util.Map; public class HighlightPhase extends AbstractComponent implements FetchSubPhase { - private static final List STANDARD_HIGHLIGHTERS_BY_PRECEDENCE = Arrays.asList("fvh", "postings", "plain"); - private final Map highlighters; public HighlightPhase(Settings settings, Map highlighters) { @@ -94,13 +92,7 @@ public void hitExecute(SearchContext context, HitContext hitContext) { } String highlighterType = field.fieldOptions().highlighterType(); if (highlighterType == null) { - for(String highlighterCandidate : STANDARD_HIGHLIGHTERS_BY_PRECEDENCE) { - if (highlighters.get(highlighterCandidate).canHighlight(fieldMapper)) { - highlighterType = highlighterCandidate; - break; - } - } - assert highlighterType != null; + highlighterType = "unified"; } Highlighter highlighter = highlighters.get(highlighterType); if (highlighter == null) { diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightUtils.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightUtils.java index 4a6e991b9a356..b241a686a248f 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightUtils.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightUtils.java @@ -35,7 +35,7 @@ public final class HighlightUtils { - //U+2029 PARAGRAPH SEPARATOR (PS): each value holds a discrete passage for highlighting (postings highlighter) + //U+2029 PARAGRAPH SEPARATOR (PS): each value holds a discrete passage for highlighting (unified highlighter) public static final char PARAGRAPH_SEPARATOR = 8233; public static final char NULL_SEPARATOR = '\u0000'; diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java index deb1464b703fc..c7943367d31b2 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java @@ -49,7 +49,6 @@ import java.util.Map; public class PlainHighlighter implements Highlighter { - private static final String CACHE_KEY = "highlight-plain"; @Override diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PostingsHighlighter.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PostingsHighlighter.java deleted file mode 100644 index 34997912febd0..0000000000000 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PostingsHighlighter.java +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.search.fetch.subphase.highlight; - -import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.highlight.Encoder; -import org.apache.lucene.search.postingshighlight.CustomPassageFormatter; -import org.apache.lucene.search.postingshighlight.CustomPostingsHighlighter; -import org.apache.lucene.search.postingshighlight.CustomSeparatorBreakIterator; -import org.apache.lucene.search.highlight.Snippet; -import org.apache.lucene.util.CollectionUtil; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.text.Text; -import org.elasticsearch.index.mapper.FieldMapper; -import org.elasticsearch.search.fetch.FetchPhaseExecutionException; -import org.elasticsearch.search.fetch.FetchSubPhase; -import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.fetch.subphase.highlight.HighlightUtils.Encoders; - -import java.io.IOException; -import java.text.BreakIterator; -import java.util.ArrayList; -import java.util.Comparator; -import java.util.HashMap; -import java.util.List; -import java.util.Locale; -import java.util.Map; - -public class PostingsHighlighter implements Highlighter { - - private static final String CACHE_KEY = "highlight-postings"; - - @Override - public HighlightField highlight(HighlighterContext highlighterContext) { - - FieldMapper fieldMapper = highlighterContext.mapper; - SearchContextHighlight.Field field = highlighterContext.field; - if (canHighlight(fieldMapper) == false) { - throw new IllegalArgumentException("the field [" + highlighterContext.fieldName - + "] should be indexed with positions and offsets in the postings list to be used with postings highlighter"); - } - - SearchContext context = highlighterContext.context; - FetchSubPhase.HitContext hitContext = highlighterContext.hitContext; - - if (!hitContext.cache().containsKey(CACHE_KEY)) { - hitContext.cache().put(CACHE_KEY, new HighlighterEntry()); - } - - HighlighterEntry highlighterEntry = (HighlighterEntry) hitContext.cache().get(CACHE_KEY); - MapperHighlighterEntry mapperHighlighterEntry = highlighterEntry.mappers.get(fieldMapper); - - if (mapperHighlighterEntry == null) { - Encoder encoder = field.fieldOptions().encoder().equals("html") ? Encoders.HTML : Encoders.DEFAULT; - CustomPassageFormatter passageFormatter = new CustomPassageFormatter( - field.fieldOptions().preTags()[0], field.fieldOptions().postTags()[0], encoder); - mapperHighlighterEntry = new MapperHighlighterEntry(passageFormatter); - } - - List snippets = new ArrayList<>(); - int numberOfFragments; - try { - Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().getType()).mappers().indexAnalyzer(); - List fieldValues = HighlightUtils.loadFieldValues(field, fieldMapper, context, hitContext); - CustomPostingsHighlighter highlighter; - if (field.fieldOptions().numberOfFragments() == 0) { - //we use a control char to separate values, which is the only char that the custom break iterator breaks the text on, - //so we don't lose the distinction between the different values of a field and we get back a snippet per value - String fieldValue = mergeFieldValues(fieldValues, HighlightUtils.NULL_SEPARATOR); - CustomSeparatorBreakIterator breakIterator = new CustomSeparatorBreakIterator(HighlightUtils.NULL_SEPARATOR); - highlighter = new CustomPostingsHighlighter(analyzer, mapperHighlighterEntry.passageFormatter, breakIterator, - fieldValue, field.fieldOptions().noMatchSize() > 0); - numberOfFragments = fieldValues.size(); //we are highlighting the whole content, one snippet per value - } else { - //using paragraph separator we make sure that each field value holds a discrete passage for highlighting - String fieldValue = mergeFieldValues(fieldValues, HighlightUtils.PARAGRAPH_SEPARATOR); - highlighter = new CustomPostingsHighlighter(analyzer, mapperHighlighterEntry.passageFormatter, - fieldValue, field.fieldOptions().noMatchSize() > 0); - numberOfFragments = field.fieldOptions().numberOfFragments(); - } - - IndexSearcher searcher = new IndexSearcher(hitContext.reader()); - Snippet[] fieldSnippets = highlighter.highlightField(fieldMapper.fieldType().name(), highlighterContext.query, searcher, - hitContext.docId(), numberOfFragments); - for (Snippet fieldSnippet : fieldSnippets) { - if (Strings.hasText(fieldSnippet.getText())) { - snippets.add(fieldSnippet); - } - } - - } catch(IOException e) { - throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e); - } - - snippets = filterSnippets(snippets, field.fieldOptions().numberOfFragments()); - - if (field.fieldOptions().scoreOrdered()) { - //let's sort the snippets by score if needed - CollectionUtil.introSort(snippets, new Comparator() { - @Override - public int compare(Snippet o1, Snippet o2) { - return (int) Math.signum(o2.getScore() - o1.getScore()); - } - }); - } - - String[] fragments = new String[snippets.size()]; - for (int i = 0; i < fragments.length; i++) { - fragments[i] = snippets.get(i).getText(); - } - - if (fragments.length > 0) { - return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments)); - } - - return null; - } - - @Override - public boolean canHighlight(FieldMapper fieldMapper) { - return fieldMapper.fieldType().indexOptions() == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS; - } - - static String mergeFieldValues(List fieldValues, char valuesSeparator) { - //postings highlighter accepts all values in a single string, as offsets etc. need to match with content - //loaded from stored fields, we merge all values using a proper separator - String rawValue = Strings.collectionToDelimitedString(fieldValues, String.valueOf(valuesSeparator)); - return rawValue.substring(0, Math.min(rawValue.length(), Integer.MAX_VALUE - 1)); - } - - static List filterSnippets(List snippets, int numberOfFragments) { - - //We need to filter the snippets as due to no_match_size we could have - //either highlighted snippets or non highlighted ones and we don't want to mix those up - List filteredSnippets = new ArrayList<>(snippets.size()); - for (Snippet snippet : snippets) { - if (snippet.isHighlighted()) { - filteredSnippets.add(snippet); - } - } - - //if there's at least one highlighted snippet, we return all the highlighted ones - //otherwise we return the first non highlighted one if available - if (filteredSnippets.size() == 0) { - if (snippets.size() > 0) { - Snippet snippet = snippets.get(0); - //if we tried highlighting the whole content using whole break iterator (as number_of_fragments was 0) - //we need to return the first sentence of the content rather than the whole content - if (numberOfFragments == 0) { - BreakIterator bi = BreakIterator.getSentenceInstance(Locale.ROOT); - String text = snippet.getText(); - bi.setText(text); - int next = bi.next(); - if (next != BreakIterator.DONE) { - String newText = text.substring(0, next).trim(); - snippet = new Snippet(newText, snippet.getScore(), snippet.isHighlighted()); - } - } - filteredSnippets.add(snippet); - } - } - - return filteredSnippets; - } - - static class HighlighterEntry { - Map mappers = new HashMap<>(); - } - - static class MapperHighlighterEntry { - final CustomPassageFormatter passageFormatter; - - private MapperHighlighterEntry(CustomPassageFormatter passageFormatter) { - this.passageFormatter = passageFormatter; - } - } -} diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java index d3a94d0411b40..684c7ddbddd87 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java @@ -21,7 +21,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.highlight.Encoder; -import org.apache.lucene.search.highlight.Snippet; +import org.apache.lucene.search.uhighlight.Snippet; import org.apache.lucene.search.uhighlight.BoundedBreakIteratorScanner; import org.apache.lucene.search.uhighlight.CustomPassageFormatter; import org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter; @@ -44,8 +44,6 @@ import java.util.stream.Collectors; import static org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter.MULTIVAL_SEP_CHAR; -import static org.elasticsearch.search.fetch.subphase.highlight.PostingsHighlighter.filterSnippets; -import static org.elasticsearch.search.fetch.subphase.highlight.PostingsHighlighter.mergeFieldValues; public class UnifiedHighlighter implements Highlighter { private static final String CACHE_KEY = "highlight-unified"; @@ -174,6 +172,49 @@ private BreakIterator getBreakIterator(SearchContextHighlight.Field field) { } } + private static List filterSnippets(List snippets, int numberOfFragments) { + + //We need to filter the snippets as due to no_match_size we could have + //either highlighted snippets or non highlighted ones and we don't want to mix those up + List filteredSnippets = new ArrayList<>(snippets.size()); + for (Snippet snippet : snippets) { + if (snippet.isHighlighted()) { + filteredSnippets.add(snippet); + } + } + + //if there's at least one highlighted snippet, we return all the highlighted ones + //otherwise we return the first non highlighted one if available + if (filteredSnippets.size() == 0) { + if (snippets.size() > 0) { + Snippet snippet = snippets.get(0); + //if we tried highlighting the whole content using whole break iterator (as number_of_fragments was 0) + //we need to return the first sentence of the content rather than the whole content + if (numberOfFragments == 0) { + BreakIterator bi = BreakIterator.getSentenceInstance(Locale.ROOT); + String text = snippet.getText(); + bi.setText(text); + int next = bi.next(); + if (next != BreakIterator.DONE) { + String newText = text.substring(0, next).trim(); + snippet = new Snippet(newText, snippet.getScore(), snippet.isHighlighted()); + } + } + filteredSnippets.add(snippet); + } + } + + return filteredSnippets; + } + + private static String mergeFieldValues(List fieldValues, char valuesSeparator) { + //postings highlighter accepts all values in a single string, as offsets etc. need to match with content + //loaded from stored fields, we merge all values using a proper separator + String rawValue = Strings.collectionToDelimitedString(fieldValues, String.valueOf(valuesSeparator)); + return rawValue.substring(0, Math.min(rawValue.length(), Integer.MAX_VALUE - 1)); + } + + private static class HighlighterEntry { Map mappers = new HashMap<>(); } diff --git a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java index fadf979d911d2..5e5108c3225d6 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java @@ -321,6 +321,16 @@ public boolean trackScores() { return in.trackScores(); } + @Override + public SearchContext trackTotalHits(boolean trackTotalHits) { + return in.trackTotalHits(trackTotalHits); + } + + @Override + public boolean trackTotalHits() { + return in.trackTotalHits(); + } + @Override public SearchContext searchAfter(FieldDoc searchAfter) { return in.searchAfter(searchAfter); diff --git a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java index ebb2157d981e7..d3c281ac23b08 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -240,6 +240,13 @@ public InnerHitsContext innerHits() { public abstract boolean trackScores(); + public abstract SearchContext trackTotalHits(boolean trackTotalHits); + + /** + * Indicates if the total hit count for the query should be tracked. Defaults to true + */ + public abstract boolean trackTotalHits(); + public abstract SearchContext searchAfter(FieldDoc searchAfter); public abstract FieldDoc searchAfter(); diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileResult.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileResult.java index 3b1bfe3c27a69..16a2f8c8ebf4c 100644 --- a/core/src/main/java/org/elasticsearch/search/profile/ProfileResult.java +++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileResult.java @@ -37,7 +37,6 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; -import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField; /** * This class is the internal representation of a profiled Query, corresponding @@ -50,12 +49,12 @@ */ public final class ProfileResult implements Writeable, ToXContentObject { - private static final ParseField TYPE = new ParseField("type"); - private static final ParseField DESCRIPTION = new ParseField("description"); - private static final ParseField NODE_TIME = new ParseField("time"); - private static final ParseField NODE_TIME_RAW = new ParseField("time_in_nanos"); - private static final ParseField CHILDREN = new ParseField("children"); - private static final ParseField BREAKDOWN = new ParseField("breakdown"); + static final ParseField TYPE = new ParseField("type"); + static final ParseField DESCRIPTION = new ParseField("description"); + static final ParseField NODE_TIME = new ParseField("time"); + static final ParseField NODE_TIME_RAW = new ParseField("time_in_nanos"); + static final ParseField CHILDREN = new ParseField("children"); + static final ParseField BREAKDOWN = new ParseField("breakdown"); private final String type; private final String description; @@ -188,7 +187,7 @@ public static ProfileResult fromXContent(XContentParser parser) throws IOExcepti // skip, total time is calculate by adding up 'timings' values in ProfileResult ctor parser.longValue(); } else { - throwUnknownField(currentFieldName, parser.getTokenLocation()); + parser.skipChildren(); } } else if (token == XContentParser.Token.START_OBJECT) { if (BREAKDOWN.match(currentFieldName)) { @@ -200,7 +199,7 @@ public static ProfileResult fromXContent(XContentParser parser) throws IOExcepti timings.put(name, value); } } else { - throwUnknownField(currentFieldName, parser.getTokenLocation()); + parser.skipChildren(); } } else if (token == XContentParser.Token.START_ARRAY) { if (CHILDREN.match(currentFieldName)) { @@ -208,7 +207,7 @@ public static ProfileResult fromXContent(XContentParser parser) throws IOExcepti children.add(ProfileResult.fromXContent(parser)); } } else { - throwUnknownField(currentFieldName, parser.getTokenLocation()); + parser.skipChildren(); } } } diff --git a/core/src/main/java/org/elasticsearch/search/profile/SearchProfileShardResults.java b/core/src/main/java/org/elasticsearch/search/profile/SearchProfileShardResults.java index eb3017bd1e71f..b7fa39c42f3ab 100644 --- a/core/src/main/java/org/elasticsearch/search/profile/SearchProfileShardResults.java +++ b/core/src/main/java/org/elasticsearch/search/profile/SearchProfileShardResults.java @@ -39,9 +39,6 @@ import java.util.TreeSet; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureFieldName; -import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField; -import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownToken; /** * A container class to hold all the profile results across all shards. Internally @@ -111,12 +108,19 @@ public static SearchProfileShardResults fromXContent(XContentParser parser) thro XContentParser.Token token = parser.currentToken(); ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation); Map searchProfileResults = new HashMap<>(); - ensureFieldName(parser, parser.nextToken(), SHARDS_FIELD); - ensureExpectedToken(XContentParser.Token.START_ARRAY, parser.nextToken(), parser::getTokenLocation); - while((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - parseSearchProfileResultsEntry(parser, searchProfileResults); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.START_ARRAY) { + if (SHARDS_FIELD.equals(parser.currentName())) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + parseSearchProfileResultsEntry(parser, searchProfileResults); + } + } else { + parser.skipChildren(); + } + } else if (token == XContentParser.Token.START_OBJECT) { + parser.skipChildren(); + } } - ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser::getTokenLocation); return new SearchProfileShardResults(searchProfileResults); } @@ -135,7 +139,7 @@ private static void parseSearchProfileResultsEntry(XContentParser parser, if (ID_FIELD.equals(currentFieldName)) { id = parser.text(); } else { - throwUnknownField(currentFieldName, parser.getTokenLocation()); + parser.skipChildren(); } } else if (token == XContentParser.Token.START_ARRAY) { if (SEARCHES_FIELD.equals(currentFieldName)) { @@ -145,10 +149,10 @@ private static void parseSearchProfileResultsEntry(XContentParser parser, } else if (AggregationProfileShardResult.AGGREGATIONS.equals(currentFieldName)) { aggProfileShardResult = AggregationProfileShardResult.fromXContent(parser); } else { - throwUnknownField(currentFieldName, parser.getTokenLocation()); + parser.skipChildren(); } } else { - throwUnknownToken(token, parser.getTokenLocation()); + parser.skipChildren(); } } searchProfileResults.put(id, new ProfileShardResult(queryProfileResults, aggProfileShardResult)); diff --git a/core/src/main/java/org/elasticsearch/search/profile/query/CollectorResult.java b/core/src/main/java/org/elasticsearch/search/profile/query/CollectorResult.java index 1fa56bde7fe95..0d4ae0384baae 100644 --- a/core/src/main/java/org/elasticsearch/search/profile/query/CollectorResult.java +++ b/core/src/main/java/org/elasticsearch/search/profile/query/CollectorResult.java @@ -34,8 +34,6 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; -import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField; -import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownToken; /** * Public interface and serialization container for profiled timings of the @@ -181,7 +179,7 @@ public static CollectorResult fromXContent(XContentParser parser) throws IOExcep } else if (TIME_NANOS.match(currentFieldName)) { time = parser.longValue(); } else { - throwUnknownField(currentFieldName, parser.getTokenLocation()); + parser.skipChildren(); } } else if (token == XContentParser.Token.START_ARRAY) { if (CHILDREN.match(currentFieldName)) { @@ -189,10 +187,10 @@ public static CollectorResult fromXContent(XContentParser parser) throws IOExcep children.add(CollectorResult.fromXContent(parser)); } } else { - throwUnknownField(currentFieldName, parser.getTokenLocation()); + parser.skipChildren(); } } else { - throwUnknownToken(token, parser.getTokenLocation()); + parser.skipChildren(); } } return new CollectorResult(name, reason, time, children); diff --git a/core/src/main/java/org/elasticsearch/search/profile/query/QueryProfileShardResult.java b/core/src/main/java/org/elasticsearch/search/profile/query/QueryProfileShardResult.java index 362826e860c61..81062ed75c6d6 100644 --- a/core/src/main/java/org/elasticsearch/search/profile/query/QueryProfileShardResult.java +++ b/core/src/main/java/org/elasticsearch/search/profile/query/QueryProfileShardResult.java @@ -33,8 +33,6 @@ import java.util.List; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; -import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField; -import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownToken; /** * A container class to hold the profile results for a single shard in the request. @@ -127,7 +125,7 @@ public static QueryProfileShardResult fromXContent(XContentParser parser) throws if (REWRITE_TIME.equals(currentFieldName)) { rewriteTime = parser.longValue(); } else { - throwUnknownField(currentFieldName, parser.getTokenLocation()); + parser.skipChildren(); } } else if (token == XContentParser.Token.START_ARRAY) { if (QUERY_ARRAY.equals(currentFieldName)) { @@ -139,10 +137,10 @@ public static QueryProfileShardResult fromXContent(XContentParser parser) throws collector = CollectorResult.fromXContent(parser); } } else { - throwUnknownField(currentFieldName, parser.getTokenLocation()); + parser.skipChildren(); } } else { - throwUnknownToken(token, parser.getTokenLocation()); + parser.skipChildren(); } } return new QueryProfileShardResult(queryProfileResults, rewriteTime, collector); diff --git a/core/src/main/java/org/elasticsearch/search/query/EarlyTerminatingCollector.java b/core/src/main/java/org/elasticsearch/search/query/EarlyTerminatingCollector.java new file mode 100644 index 0000000000000..2429c1c68e6ec --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/query/EarlyTerminatingCollector.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.query; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.CollectionTerminatedException; +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.FilterCollector; +import org.apache.lucene.search.FilterLeafCollector; +import org.apache.lucene.search.LeafCollector; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * A {@link Collector} that early terminates collection after maxCountHits docs have been collected. + */ +public class EarlyTerminatingCollector extends FilterCollector { + private final int maxCountHits; + private int numCollected; + private boolean terminatedEarly = false; + + EarlyTerminatingCollector(final Collector delegate, int maxCountHits) { + super(delegate); + this.maxCountHits = maxCountHits; + } + + @Override + public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { + if (numCollected >= maxCountHits) { + throw new CollectionTerminatedException(); + } + return new FilterLeafCollector(super.getLeafCollector(context)) { + @Override + public void collect(int doc) throws IOException { + super.collect(doc); + if (++numCollected >= maxCountHits) { + terminatedEarly = true; + throw new CollectionTerminatedException(); + } + }; + }; + } + + public boolean terminatedEarly() { + return terminatedEarly; + } +} diff --git a/core/src/main/java/org/elasticsearch/search/query/QueryCollectorContext.java b/core/src/main/java/org/elasticsearch/search/query/QueryCollectorContext.java new file mode 100644 index 0000000000000..eaaf07ce305b6 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/query/QueryCollectorContext.java @@ -0,0 +1,275 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.query; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.EarlyTerminatingSortingCollector; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MultiCollector; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.TimeLimitingCollector; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHitCountCollector; +import org.apache.lucene.search.Weight; +import org.apache.lucene.util.Counter; +import org.elasticsearch.common.inject.Provider; +import org.elasticsearch.common.lucene.MinimumScoreCollector; +import org.elasticsearch.common.lucene.search.FilteredCollector; +import org.elasticsearch.search.profile.query.InternalProfileCollector; +import org.elasticsearch.tasks.TaskCancelledException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.function.BooleanSupplier; +import java.util.function.IntSupplier; + +import static org.elasticsearch.search.profile.query.CollectorResult.REASON_SEARCH_CANCELLED; +import static org.elasticsearch.search.profile.query.CollectorResult.REASON_SEARCH_MIN_SCORE; +import static org.elasticsearch.search.profile.query.CollectorResult.REASON_SEARCH_MULTI; +import static org.elasticsearch.search.profile.query.CollectorResult.REASON_SEARCH_POST_FILTER; +import static org.elasticsearch.search.profile.query.CollectorResult.REASON_SEARCH_TERMINATE_AFTER_COUNT; +import static org.elasticsearch.search.profile.query.CollectorResult.REASON_SEARCH_TIMEOUT; +import static org.elasticsearch.search.query.TopDocsCollectorContext.shortcutTotalHitCount; + +abstract class QueryCollectorContext { + private String profilerName; + + QueryCollectorContext(String profilerName) { + this.profilerName = profilerName; + } + + /** + * Creates a collector that delegates documents to the provided in collector. + * @param in The delegate collector + */ + abstract Collector create(Collector in) throws IOException; + + /** + * Wraps this collector with a profiler + */ + protected InternalProfileCollector createWithProfiler(InternalProfileCollector in) throws IOException { + final Collector collector = create(in); + return new InternalProfileCollector(collector, profilerName, in != null ? Collections.singletonList(in) : Collections.emptyList()); + } + + /** + * A value of false indicates that the underlying collector can infer + * its results directly from the context (search is not needed). + * Default to true (search is needed). + */ + boolean shouldCollect() { + return true; + } + + /** + * Post-process result after search execution. + * + * @param result The query search result to populate + * @param hasCollected True if search was executed + */ + void postProcess(QuerySearchResult result, boolean hasCollected) throws IOException {} + + /** + * Creates the collector tree from the provided collectors + * @param collectors Ordered list of collector context + */ + static Collector createQueryCollector(List collectors) throws IOException { + Collector collector = null; + for (QueryCollectorContext ctx : collectors) { + collector = ctx.create(collector); + } + return collector; + } + + /** + * Creates the collector tree from the provided collectors and wraps each collector with a profiler + * @param collectors Ordered list of collector context + */ + static InternalProfileCollector createQueryCollectorWithProfiler(List collectors) throws IOException { + InternalProfileCollector collector = null; + for (QueryCollectorContext ctx : collectors) { + collector = ctx.createWithProfiler(collector); + } + return collector; + } + + /** + * Filters documents with a query score greater than minScore + * @param minScore The minimum score filter + */ + static QueryCollectorContext createMinScoreCollectorContext(float minScore) { + return new QueryCollectorContext(REASON_SEARCH_MIN_SCORE) { + @Override + Collector create(Collector in) { + return new MinimumScoreCollector(in, minScore); + } + }; + } + + /** + * Filters documents based on the provided query + */ + static QueryCollectorContext createFilteredCollectorContext(IndexSearcher searcher, Query query) { + return new QueryCollectorContext(REASON_SEARCH_POST_FILTER) { + @Override + Collector create(Collector in ) throws IOException { + final Weight filterWeight = searcher.createNormalizedWeight(query, false); + return new FilteredCollector(in, filterWeight); + } + }; + } + + /** + * Creates a multi collector from the provided subs + */ + static QueryCollectorContext createMultiCollectorContext(Collection subs) { + return new QueryCollectorContext(REASON_SEARCH_MULTI) { + @Override + Collector create(Collector in) throws IOException { + List subCollectors = new ArrayList<> (); + subCollectors.add(in); + subCollectors.addAll(subs); + return MultiCollector.wrap(subCollectors); + } + + @Override + protected InternalProfileCollector createWithProfiler(InternalProfileCollector in) throws IOException { + final List subCollectors = new ArrayList<> (); + subCollectors.add(in); + if (subs.stream().anyMatch((col) -> col instanceof InternalProfileCollector == false)) { + throw new IllegalArgumentException("non-profiling collector"); + } + for (Collector collector : subs) { + subCollectors.add((InternalProfileCollector) collector); + } + final Collector collector = MultiCollector.wrap(subCollectors); + return new InternalProfileCollector(collector, REASON_SEARCH_MULTI, subCollectors); + } + }; + } + + /** + * Creates a time limiting collector limiting the collection to timeOutMillisms. + */ + static QueryCollectorContext createTimeoutCollectorContext(Counter timeEstimate, long timeoutMillis) { + return new QueryCollectorContext(REASON_SEARCH_TIMEOUT) { + @Override + Collector create(Collector in) throws IOException { + return new TimeLimitingCollector(in, timeEstimate, timeoutMillis); + } + + @Override + boolean shouldCollect() { + return false; + } + }; + } + + /** + * Creates a collector that throws {@link TaskCancelledException} if the search is cancelled + */ + static QueryCollectorContext createCancellableCollectorContext(Provider cancelled, boolean lowLevelCancellation) { + return new QueryCollectorContext(REASON_SEARCH_CANCELLED) { + @Override + Collector create(Collector in) throws IOException { + return new CancellableCollector(cancelled, lowLevelCancellation, in); + } + + @Override + boolean shouldCollect() { + return false; + } + }; + } + + /** + * Creates collector limiting the collection to the first numHits documents + */ + static QueryCollectorContext createEarlyTerminationCollectorContext(int numHits) { + return new QueryCollectorContext(REASON_SEARCH_TERMINATE_AFTER_COUNT) { + private EarlyTerminatingCollector collector; + + @Override + Collector create(Collector in) throws IOException { + assert collector == null; + this.collector = new EarlyTerminatingCollector(in, numHits); + return collector; + } + + @Override + void postProcess(QuerySearchResult result, boolean hasCollected) throws IOException { + if (hasCollected && collector.terminatedEarly()) { + result.terminatedEarly(true); + } + } + }; + } + + /** + * Creates a sorting termination collector limiting the collection to the first numHits per segment. + * The total hit count matching the query is also computed if trackTotalHits is true. + */ + static QueryCollectorContext createEarlySortingTerminationCollectorContext(IndexReader reader, + Query query, + Sort indexSort, + int numHits, + boolean trackTotalHits, + boolean shouldCollect) { + return new QueryCollectorContext(REASON_SEARCH_TERMINATE_AFTER_COUNT) { + private BooleanSupplier terminatedEarlySupplier; + private IntSupplier countSupplier = null; + + @Override + Collector create(Collector in) throws IOException { + EarlyTerminatingSortingCollector sortingCollector = new EarlyTerminatingSortingCollector(in, indexSort, numHits); + terminatedEarlySupplier = sortingCollector::terminatedEarly; + Collector collector = sortingCollector; + if (trackTotalHits) { + int count = shouldCollect ? -1 : shortcutTotalHitCount(reader, query); + if (count == -1) { + TotalHitCountCollector countCollector = new TotalHitCountCollector(); + collector = MultiCollector.wrap(sortingCollector, countCollector); + this.countSupplier = countCollector::getTotalHits; + } else { + this.countSupplier = () -> count; + } + } + return collector; + } + + @Override + void postProcess(QuerySearchResult result, boolean hasCollected) throws IOException { + if (terminatedEarlySupplier.getAsBoolean()) { + result.terminatedEarly(true); + } + if (countSupplier != null) { + final TopDocs topDocs = result.topDocs(); + topDocs.totalHits = countSupplier.getAsInt(); + result.topDocs(topDocs, result.sortValueFormats()); + } + } + }; + } +} diff --git a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java index 272c57fe98024..10c180f687ee6 100644 --- a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -19,35 +19,23 @@ package org.elasticsearch.search.query; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.Term; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.queries.MinDocQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Collector; import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.EarlyTerminatingSortingCollector; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; -import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TimeLimitingCollector; import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.TopDocsCollector; -import org.apache.lucene.search.TopFieldCollector; -import org.apache.lucene.search.TopScoreDocCollector; -import org.apache.lucene.search.TotalHitCountCollector; -import org.apache.lucene.search.Weight; -import org.apache.lucene.search.grouping.CollapsingTopDocsCollector; -import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.lucene.MinimumScoreCollector; -import org.elasticsearch.common.lucene.search.FilteredCollector; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.search.collapse.CollapseContext; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchPhase; import org.elasticsearch.search.SearchService; @@ -56,18 +44,22 @@ import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.profile.ProfileShardResult; import org.elasticsearch.search.profile.SearchProfileShardResults; -import org.elasticsearch.search.profile.query.CollectorResult; import org.elasticsearch.search.profile.query.InternalProfileCollector; import org.elasticsearch.search.rescore.RescorePhase; -import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.sort.SortAndFormats; import org.elasticsearch.search.suggest.SuggestPhase; -import java.util.AbstractList; -import java.util.ArrayList; -import java.util.Collections; +import java.util.LinkedList; import java.util.List; -import java.util.concurrent.Callable; + +import static org.elasticsearch.search.query.QueryCollectorContext.createCancellableCollectorContext; +import static org.elasticsearch.search.query.QueryCollectorContext.createEarlySortingTerminationCollectorContext; +import static org.elasticsearch.search.query.QueryCollectorContext.createEarlyTerminationCollectorContext; +import static org.elasticsearch.search.query.QueryCollectorContext.createFilteredCollectorContext; +import static org.elasticsearch.search.query.QueryCollectorContext.createMinScoreCollectorContext; +import static org.elasticsearch.search.query.QueryCollectorContext.createMultiCollectorContext; +import static org.elasticsearch.search.query.QueryCollectorContext.createTimeoutCollectorContext; +import static org.elasticsearch.search.query.TopDocsCollectorContext.createTopDocsCollectorContext; /** * Query phase of a search request, used to run the query and get back from each shard information about the matching documents @@ -104,8 +96,9 @@ public void execute(SearchContext searchContext) throws QueryPhaseExecutionExcep // request, preProcess is called on the DFS phase phase, this is why we pre-process them // here to make sure it happens during the QUERY phase aggregationPhase.preProcess(searchContext); - - boolean rescore = execute(searchContext, searchContext.searcher()); + Sort indexSort = searchContext.mapperService().getIndexSettings().getIndexSortConfig() + .buildIndexSort(searchContext.mapperService()::fullName, searchContext.fieldData()::getForField); + boolean rescore = execute(searchContext, searchContext.searcher(), indexSort); if (rescore) { // only if we do a regular search rescorePhase.execute(searchContext); @@ -120,298 +113,149 @@ public void execute(SearchContext searchContext) throws QueryPhaseExecutionExcep } } - private static boolean returnsDocsInOrder(Query query, SortAndFormats sf) { - if (sf == null || Sort.RELEVANCE.equals(sf.sort)) { - // sort by score - // queries that return constant scores will return docs in index - // order since Lucene tie-breaks on the doc id - return query.getClass() == ConstantScoreQuery.class - || query.getClass() == MatchAllDocsQuery.class; - } else { - return Sort.INDEXORDER.equals(sf.sort); - } - } - /** * In a package-private method so that it can be tested without having to * wire everything (mapperService, etc.) * @return whether the rescoring phase should be executed */ - static boolean execute(SearchContext searchContext, final IndexSearcher searcher) throws QueryPhaseExecutionException { + static boolean execute(SearchContext searchContext, final IndexSearcher searcher, @Nullable Sort indexSort) throws QueryPhaseExecutionException { QuerySearchResult queryResult = searchContext.queryResult(); queryResult.searchTimedOut(false); - final boolean doProfile = searchContext.getProfilers() != null; - boolean rescore = false; try { queryResult.from(searchContext.from()); queryResult.size(searchContext.size()); - Query query = searchContext.query(); - - final int totalNumDocs = searcher.getIndexReader().numDocs(); - int numDocs = Math.min(searchContext.from() + searchContext.size(), totalNumDocs); - - Collector collector; - Callable topDocsCallable; - DocValueFormat[] sortValueFormats = new DocValueFormat[0]; - assert query == searcher.rewrite(query); // already rewritten - if (searchContext.size() == 0) { // no matter what the value of from is - final TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector(); - collector = totalHitCountCollector; - if (searchContext.getProfilers() != null) { - collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_COUNT, Collections.emptyList()); - } - topDocsCallable = () -> new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0); - } else { - // Perhaps have a dedicated scroll phase? - final ScrollContext scrollContext = searchContext.scrollContext(); - assert (scrollContext != null) == (searchContext.request().scroll() != null); - final Collector topDocsCollector; - ScoreDoc after = null; - if (searchContext.request().scroll() != null) { - numDocs = Math.min(searchContext.size(), totalNumDocs); - after = scrollContext.lastEmittedDoc; - - if (returnsDocsInOrder(query, searchContext.sort())) { - if (scrollContext.totalHits == -1) { - // first round - assert scrollContext.lastEmittedDoc == null; - // there is not much that we can optimize here since we want to collect all - // documents in order to get the total number of hits - } else { - // now this gets interesting: since we sort in index-order, we can directly - // skip to the desired doc and stop collecting after ${size} matches - if (scrollContext.lastEmittedDoc != null) { - BooleanQuery bq = new BooleanQuery.Builder() - .add(query, BooleanClause.Occur.MUST) - .add(new MinDocQuery(after.doc + 1), BooleanClause.Occur.FILTER) - .build(); - query = bq; - } - searchContext.terminateAfter(numDocs); - } - } - } else { - after = searchContext.searchAfter(); - } - if (totalNumDocs == 0) { - // top collectors don't like a size of 0 - numDocs = 1; - } - assert numDocs > 0; - if (searchContext.collapse() == null) { - if (searchContext.sort() != null) { - SortAndFormats sf = searchContext.sort(); - topDocsCollector = TopFieldCollector.create(sf.sort, numDocs, - (FieldDoc) after, true, searchContext.trackScores(), searchContext.trackScores()); - sortValueFormats = sf.formats; + final ScrollContext scrollContext = searchContext.scrollContext(); + if (scrollContext != null) { + if (returnsDocsInOrder(query, searchContext.sort())) { + if (scrollContext.totalHits == -1) { + // first round + assert scrollContext.lastEmittedDoc == null; + // there is not much that we can optimize here since we want to collect all + // documents in order to get the total number of hits } else { - rescore = !searchContext.rescore().isEmpty(); - for (RescoreSearchContext rescoreContext : searchContext.rescore()) { - numDocs = Math.max(rescoreContext.window(), numDocs); + // now this gets interesting: since we sort in index-order, we can directly + // skip to the desired doc + final ScoreDoc after = scrollContext.lastEmittedDoc; + if (after != null) { + BooleanQuery bq = new BooleanQuery.Builder() + .add(query, BooleanClause.Occur.MUST) + .add(new MinDocQuery(after.doc + 1), BooleanClause.Occur.FILTER) + .build(); + query = bq; } - topDocsCollector = TopScoreDocCollector.create(numDocs, after); - } - } else { - Sort sort = Sort.RELEVANCE; - if (searchContext.sort() != null) { - sort = searchContext.sort().sort; + // ... and stop collecting after ${size} matches + searchContext.terminateAfter(searchContext.size()); + searchContext.trackTotalHits(false); } - CollapseContext collapse = searchContext.collapse(); - topDocsCollector = collapse.createTopDocs(sort, numDocs, searchContext.trackScores()); - if (searchContext.sort() == null) { - sortValueFormats = new DocValueFormat[] {DocValueFormat.RAW}; - } else { - sortValueFormats = searchContext.sort().formats; - } - } - collector = topDocsCollector; - if (doProfile) { - collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_TOP_HITS, Collections.emptyList()); - } - topDocsCallable = () -> { - final TopDocs topDocs; - if (topDocsCollector instanceof TopDocsCollector) { - topDocs = ((TopDocsCollector) topDocsCollector).topDocs(); - } else if (topDocsCollector instanceof CollapsingTopDocsCollector) { - topDocs = ((CollapsingTopDocsCollector) topDocsCollector).getTopDocs(); - } else { - throw new IllegalStateException("Unknown top docs collector " + topDocsCollector.getClass().getName()); - } - if (scrollContext != null) { - if (scrollContext.totalHits == -1) { - // first round - scrollContext.totalHits = topDocs.totalHits; - scrollContext.maxScore = topDocs.getMaxScore(); - } else { - // subsequent round: the total number of hits and - // the maximum score were computed on the first round - topDocs.totalHits = scrollContext.totalHits; - topDocs.setMaxScore(scrollContext.maxScore); - } - if (searchContext.request().numberOfShards() == 1) { - // if we fetch the document in the same roundtrip, we already know the last emitted doc - if (topDocs.scoreDocs.length > 0) { - // set the last emitted doc - scrollContext.lastEmittedDoc = topDocs.scoreDocs[topDocs.scoreDocs.length - 1]; - } - } - } - return topDocs; - }; - } - - final boolean terminateAfterSet = searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER; - if (terminateAfterSet) { - final Collector child = collector; - // throws Lucene.EarlyTerminationException when given count is reached - collector = Lucene.wrapCountBasedEarlyTerminatingCollector(collector, searchContext.terminateAfter()); - if (doProfile) { - collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_TERMINATE_AFTER_COUNT, - Collections.singletonList((InternalProfileCollector) child)); } } + final LinkedList collectors = new LinkedList<>(); if (searchContext.parsedPostFilter() != null) { - final Collector child = collector; - // this will only get applied to the actual search collector and not - // to any scoped collectors, also, it will only be applied to the main collector - // since that is where the filter should only work - final Weight filterWeight = searcher.createNormalizedWeight(searchContext.parsedPostFilter().query(), false); - collector = new FilteredCollector(collector, filterWeight); - if (doProfile) { - collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_POST_FILTER, - Collections.singletonList((InternalProfileCollector) child)); - } + // add post filters before aggregations + // it will only be applied to top hits + collectors.add(createFilteredCollectorContext(searcher, searchContext.parsedPostFilter().query())); } - - // plug in additional collectors, like aggregations - final List subCollectors = new ArrayList<>(); - subCollectors.add(collector); - subCollectors.addAll(searchContext.queryCollectors().values()); - collector = MultiCollector.wrap(subCollectors); - if (doProfile && collector instanceof InternalProfileCollector == false) { - // When there is a single collector to wrap, MultiCollector returns it - // directly, so only wrap in the case that there are several sub collectors - final List children = new AbstractList() { - @Override - public InternalProfileCollector get(int index) { - return (InternalProfileCollector) subCollectors.get(index); - } - @Override - public int size() { - return subCollectors.size(); - } - }; - collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_MULTI, children); + if (searchContext.queryCollectors().isEmpty() == false) { + // plug in additional collectors, like aggregations + collectors.add(createMultiCollectorContext(searchContext.queryCollectors().values())); } - - // apply the minimum score after multi collector so we filter aggs as well if (searchContext.minimumScore() != null) { - final Collector child = collector; - collector = new MinimumScoreCollector(collector, searchContext.minimumScore()); - if (doProfile) { - collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_MIN_SCORE, - Collections.singletonList((InternalProfileCollector) child)); - } + // apply the minimum score after multi collector so we filter aggs as well + collectors.add(createMinScoreCollectorContext(searchContext.minimumScore())); } - - if (collector.getClass() == TotalHitCountCollector.class) { - // Optimize counts in simple cases to return in constant time - // instead of using a collector - while (true) { - // remove wrappers that don't matter for counts - // this is necessary so that we don't only optimize match_all - // queries but also match_all queries that are nested in - // a constant_score query - if (query instanceof ConstantScoreQuery) { - query = ((ConstantScoreQuery) query).getQuery(); - } else { - break; - } - } - - if (query.getClass() == MatchAllDocsQuery.class) { - collector = null; - topDocsCallable = new Callable() { - @Override - public TopDocs call() throws Exception { - int count = searcher.getIndexReader().numDocs(); - return new TopDocs(count, Lucene.EMPTY_SCORE_DOCS, 0); - } - }; - } else if (query.getClass() == TermQuery.class && searcher.getIndexReader().hasDeletions() == false) { - final Term term = ((TermQuery) query).getTerm(); - collector = null; - topDocsCallable = new Callable() { - @Override - public TopDocs call() throws Exception { - int count = 0; - for (LeafReaderContext context : searcher.getIndexReader().leaves()) { - count += context.reader().docFreq(term); - } - return new TopDocs(count, Lucene.EMPTY_SCORE_DOCS, 0); - } - }; - } + if (searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER) { + // apply terminate after after all filters collectors + collectors.add(createEarlyTerminationCollectorContext(searchContext.terminateAfter())); } - final boolean timeoutSet = searchContext.timeout() != null && !searchContext.timeout().equals(SearchService.NO_TIMEOUT); - if (timeoutSet && collector != null) { // collector might be null if no collection is actually needed - final Collector child = collector; + boolean timeoutSet = scrollContext == null && searchContext.timeout() != null && + searchContext.timeout().equals(SearchService.NO_TIMEOUT) == false; + if (timeoutSet) { // TODO: change to use our own counter that uses the scheduler in ThreadPool // throws TimeLimitingCollector.TimeExceededException when timeout has reached - collector = Lucene.wrapTimeLimitingCollector(collector, searchContext.timeEstimateCounter(), searchContext.timeout().millis()); - if (doProfile) { - collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_TIMEOUT, - Collections.singletonList((InternalProfileCollector) child)); - } + collectors.add(createTimeoutCollectorContext(searchContext.timeEstimateCounter(), searchContext.timeout().millis())); } - - if (collector != null) { - final Collector child = collector; - collector = new CancellableCollector(searchContext.getTask()::isCancelled, searchContext.lowLevelCancellation(), collector); - if (doProfile) { - collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_CANCELLED, - Collections.singletonList((InternalProfileCollector) child)); - } + // add cancellable + collectors.add(createCancellableCollectorContext(searchContext.getTask()::isCancelled, searchContext.lowLevelCancellation())); + + final IndexReader reader = searcher.getIndexReader(); + final boolean doProfile = searchContext.getProfilers() != null; + // create the top docs collector last when the other collectors are known + final TopDocsCollectorContext topDocsFactory = createTopDocsCollectorContext(searchContext, reader, + collectors.stream().anyMatch(QueryCollectorContext::shouldCollect)); + final boolean shouldCollect = topDocsFactory.shouldCollect(); + if (scrollContext == null && topDocsFactory.numHits() > 0 && canEarlyTerminate(indexSort, searchContext)) { + // top docs collection can be early terminated based on index sort + // add the collector context first so we don't early terminate aggs but only top docs + collectors.addFirst(createEarlySortingTerminationCollectorContext(reader, searchContext.query(), indexSort, + topDocsFactory.numHits(), searchContext.trackTotalHits(), shouldCollect)); + } + // add the top docs collector, the first collector context in the chain + collectors.addFirst(topDocsFactory); + + final Collector queryCollector; + if (doProfile) { + InternalProfileCollector profileCollector = QueryCollectorContext.createQueryCollectorWithProfiler(collectors); + searchContext.getProfilers().getCurrentQueryProfiler().setCollector(profileCollector); + queryCollector = profileCollector; + } else { + queryCollector = QueryCollectorContext.createQueryCollector(collectors); } try { - if (collector != null) { - if (doProfile) { - searchContext.getProfilers().getCurrentQueryProfiler().setCollector((InternalProfileCollector) collector); - } - searcher.search(query, collector); + if (shouldCollect) { + searcher.search(query, queryCollector); } } catch (TimeLimitingCollector.TimeExceededException e) { assert timeoutSet : "TimeExceededException thrown even though timeout wasn't set"; queryResult.searchTimedOut(true); - } catch (Lucene.EarlyTerminationException e) { - assert terminateAfterSet : "EarlyTerminationException thrown even though terminateAfter wasn't set"; - queryResult.terminatedEarly(true); } finally { searchContext.clearReleasables(SearchContext.Lifetime.COLLECTION); } - if (terminateAfterSet && queryResult.terminatedEarly() == null) { - queryResult.terminatedEarly(false); - } - - queryResult.topDocs(topDocsCallable.call(), sortValueFormats); + final QuerySearchResult result = searchContext.queryResult(); + for (QueryCollectorContext ctx : collectors) { + ctx.postProcess(result, shouldCollect); + } if (searchContext.getProfilers() != null) { - ProfileShardResult shardResults = SearchProfileShardResults - .buildShardResults(searchContext.getProfilers()); - searchContext.queryResult().profileResults(shardResults); + ProfileShardResult shardResults = SearchProfileShardResults.buildShardResults(searchContext.getProfilers()); + result.profileResults(shardResults); } - - return rescore; - + return topDocsFactory.shouldRescore(); } catch (Exception e) { throw new QueryPhaseExecutionException(searchContext, "Failed to execute main query", e); } } + + /** + * Returns true if the provided query returns docs in index order (internal doc ids). + * @param query The query to execute + * @param sf The query sort + */ + static boolean returnsDocsInOrder(Query query, SortAndFormats sf) { + if (sf == null || Sort.RELEVANCE.equals(sf.sort)) { + // sort by score + // queries that return constant scores will return docs in index + // order since Lucene tie-breaks on the doc id + return query.getClass() == ConstantScoreQuery.class + || query.getClass() == MatchAllDocsQuery.class; + } else { + return Sort.INDEXORDER.equals(sf.sort); + } + } + + /** + * Returns true if the provided searchContext can early terminate based on indexSort + * @param indexSort The index sort specification + * @param context The search context for the request + */ + static boolean canEarlyTerminate(Sort indexSort, SearchContext context) { + final Sort sort = context.sort() == null ? Sort.RELEVANCE : context.sort().sort; + return indexSort != null && EarlyTerminatingSortingCollector.canEarlyTerminate(sort, indexSort); + } } diff --git a/core/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java b/core/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java new file mode 100644 index 0000000000000..93c2aa17de6d9 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java @@ -0,0 +1,306 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.query; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TopDocsCollector; +import org.apache.lucene.search.TopFieldCollector; +import org.apache.lucene.search.TopScoreDocCollector; +import org.apache.lucene.search.TotalHitCountCollector; +import org.apache.lucene.search.grouping.CollapsingTopDocsCollector; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.collapse.CollapseContext; +import org.elasticsearch.search.internal.ScrollContext; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.rescore.RescoreSearchContext; +import org.elasticsearch.search.sort.SortAndFormats; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.search.profile.query.CollectorResult.REASON_SEARCH_COUNT; +import static org.elasticsearch.search.profile.query.CollectorResult.REASON_SEARCH_TOP_HITS; + +/** + * A {@link QueryCollectorContext} that creates top docs collector + */ +abstract class TopDocsCollectorContext extends QueryCollectorContext { + protected final int numHits; + + TopDocsCollectorContext(String profilerName, int numHits) { + super(profilerName); + this.numHits = numHits; + } + + /** + * Returns the number of top docs to retrieve + */ + final int numHits() { + return numHits; + } + + /** + * Returns true if the top docs should be re-scored after initial search + */ + boolean shouldRescore() { + return false; + } + + static class TotalHitCountCollectorContext extends TopDocsCollectorContext { + private final TotalHitCountCollector collector; + private final int hitCount; + + /** + * Ctr + * @param reader The index reader + * @param query The query to execute + * @param shouldCollect True if any previous collector context in the chain forces the search to be executed, false otherwise + */ + private TotalHitCountCollectorContext(IndexReader reader, Query query, boolean shouldCollect) throws IOException { + super(REASON_SEARCH_COUNT, 0); + this.collector = new TotalHitCountCollector(); + // implicit total hit counts are valid only when there is no filter collector in the chain + // so we check the shortcut only if shouldCollect is true + this.hitCount = shouldCollect ? -1 : shortcutTotalHitCount(reader, query); + } + + @Override + boolean shouldCollect() { + return hitCount == -1; + } + + Collector create(Collector in) { + assert in == null; + return collector; + } + + @Override + void postProcess(QuerySearchResult result, boolean hasCollected) { + final int totalHitCount; + if (hasCollected) { + totalHitCount = collector.getTotalHits(); + } else { + assert hitCount != -1; + totalHitCount = hitCount; + } + result.topDocs(new TopDocs(totalHitCount, Lucene.EMPTY_SCORE_DOCS, 0), null); + } + } + + static class CollapsingTopDocsCollectorContext extends TopDocsCollectorContext { + private final DocValueFormat[] sortFmt; + private final CollapsingTopDocsCollector topDocsCollector; + + /** + * Ctr + * @param collapseContext The collapsing context + * @param sortAndFormats The query sort + * @param numHits The number of collapsed top hits to retrieve. + * @param trackMaxScore True if max score should be tracked + */ + private CollapsingTopDocsCollectorContext(CollapseContext collapseContext, + @Nullable SortAndFormats sortAndFormats, + int numHits, + boolean trackMaxScore) { + super(REASON_SEARCH_TOP_HITS, numHits); + assert numHits > 0; + assert collapseContext != null; + Sort sort = sortAndFormats == null ? Sort.RELEVANCE : sortAndFormats.sort; + this.sortFmt = sortAndFormats == null ? new DocValueFormat[] { DocValueFormat.RAW } : sortAndFormats.formats; + this.topDocsCollector = collapseContext.createTopDocs(sort, numHits, trackMaxScore); + } + + @Override + Collector create(Collector in) throws IOException { + assert in == null; + return topDocsCollector; + } + + @Override + void postProcess(QuerySearchResult result, boolean hasCollected) throws IOException { + assert hasCollected; + result.topDocs(topDocsCollector.getTopDocs(), sortFmt); + } + } + + abstract static class SimpleTopDocsCollectorContext extends TopDocsCollectorContext { + private final @Nullable SortAndFormats sortAndFormats; + private final TopDocsCollector topDocsCollector; + + /** + * Ctr + * @param sortAndFormats The query sort + * @param numHits The number of top hits to retrieve + * @param searchAfter The doc this request should "search after" + * @param trackMaxScore True if max score should be tracked + */ + private SimpleTopDocsCollectorContext(@Nullable SortAndFormats sortAndFormats, + @Nullable ScoreDoc searchAfter, + int numHits, + boolean trackMaxScore) throws IOException { + super(REASON_SEARCH_TOP_HITS, numHits); + this.sortAndFormats = sortAndFormats; + if (sortAndFormats == null) { + this.topDocsCollector = TopScoreDocCollector.create(numHits, searchAfter); + } else { + this.topDocsCollector = TopFieldCollector.create(sortAndFormats.sort, numHits, + (FieldDoc) searchAfter, true, trackMaxScore, trackMaxScore); + } + } + + @Override + Collector create(Collector in) { + assert in == null; + return topDocsCollector; + } + + @Override + void postProcess(QuerySearchResult result, boolean hasCollected) throws IOException { + assert hasCollected; + final TopDocs topDocs = topDocsCollector.topDocs(); + result.topDocs(topDocs, sortAndFormats == null ? null : sortAndFormats.formats); + } + } + + static class ScrollingTopDocsCollectorContext extends SimpleTopDocsCollectorContext { + private final ScrollContext scrollContext; + private final int numberOfShards; + + private ScrollingTopDocsCollectorContext(ScrollContext scrollContext, + @Nullable SortAndFormats sortAndFormats, + int numHits, + boolean trackMaxScore, + int numberOfShards) throws IOException { + super(sortAndFormats, scrollContext.lastEmittedDoc, numHits, trackMaxScore); + this.scrollContext = Objects.requireNonNull(scrollContext); + this.numberOfShards = numberOfShards; + } + + @Override + void postProcess(QuerySearchResult result, boolean hasCollected) throws IOException { + super.postProcess(result, hasCollected); + final TopDocs topDocs = result.topDocs(); + if (scrollContext.totalHits == -1) { + // first round + scrollContext.totalHits = topDocs.totalHits; + scrollContext.maxScore = topDocs.getMaxScore(); + } else { + // subsequent round: the total number of hits and + // the maximum score were computed on the first round + topDocs.totalHits = scrollContext.totalHits; + topDocs.setMaxScore(scrollContext.maxScore); + } + if (numberOfShards == 1) { + // if we fetch the document in the same roundtrip, we already know the last emitted doc + if (topDocs.scoreDocs.length > 0) { + // set the last emitted doc + scrollContext.lastEmittedDoc = topDocs.scoreDocs[topDocs.scoreDocs.length - 1]; + } + } + result.topDocs(topDocs, result.sortValueFormats()); + } + } + + /** + * Returns query total hit count if the query is a {@link MatchAllDocsQuery} + * or a {@link TermQuery} and the reader has no deletions, + * -1 otherwise. + */ + static int shortcutTotalHitCount(IndexReader reader, Query query) throws IOException { + while (true) { + // remove wrappers that don't matter for counts + // this is necessary so that we don't only optimize match_all + // queries but also match_all queries that are nested in + // a constant_score query + if (query instanceof ConstantScoreQuery) { + query = ((ConstantScoreQuery) query).getQuery(); + } else if (query instanceof BoostQuery) { + query = ((BoostQuery) query).getQuery(); + } else { + break; + } + } + if (query.getClass() == MatchAllDocsQuery.class) { + return reader.numDocs(); + } else if (query.getClass() == TermQuery.class && reader.hasDeletions() == false) { + final Term term = ((TermQuery) query).getTerm(); + int count = 0; + for (LeafReaderContext context : reader.leaves()) { + count += context.reader().docFreq(term); + } + return count; + } else { + return -1; + } + } + + /** + * Creates a {@link TopDocsCollectorContext} from the provided searchContext + */ + static TopDocsCollectorContext createTopDocsCollectorContext(SearchContext searchContext, + IndexReader reader, + boolean shouldCollect) throws IOException { + final Query query = searchContext.query(); + // top collectors don't like a size of 0 + final int totalNumDocs = Math.max(1, reader.numDocs()); + if (searchContext.size() == 0) { + // no matter what the value of from is + return new TotalHitCountCollectorContext(reader, query, shouldCollect); + } else if (searchContext.scrollContext() != null) { + // no matter what the value of from is + int numDocs = Math.min(searchContext.size(), totalNumDocs); + return new ScrollingTopDocsCollectorContext(searchContext.scrollContext(), + searchContext.sort(), numDocs, searchContext.trackScores(), searchContext.numberOfShards()); + } else if (searchContext.collapse() != null) { + int numDocs = Math.min(searchContext.from() + searchContext.size(), totalNumDocs); + return new CollapsingTopDocsCollectorContext(searchContext.collapse(), + searchContext.sort(), numDocs, searchContext.trackScores()); + } else { + int numDocs = Math.min(searchContext.from() + searchContext.size(), totalNumDocs); + final boolean rescore = searchContext.rescore().isEmpty() == false; + for (RescoreSearchContext rescoreContext : searchContext.rescore()) { + numDocs = Math.max(numDocs, rescoreContext.window()); + } + return new SimpleTopDocsCollectorContext(searchContext.sort(), + searchContext.searchAfter(), + numDocs, + searchContext.trackScores()) { + @Override + boolean shouldRescore() { + return rescore; + } + }; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/transport/TransportActionProxy.java b/core/src/main/java/org/elasticsearch/transport/TransportActionProxy.java index 502ab51e0fa1b..5259fca507e49 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportActionProxy.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportActionProxy.java @@ -158,4 +158,18 @@ public static TransportRequest unwrapRequest(TransportRequest request) { } return request; } + + /** + * Returns true iff the given action is a proxy action + */ + public static boolean isProxyAction(String action) { + return action.startsWith(PROXY_ACTION_PREFIX); + } + + /** + * Returns true iff the given request is a proxy request + */ + public static boolean isProxyRequest(TransportRequest request) { + return request instanceof ProxyRequest; + } } diff --git a/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java b/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java index b047716956728..8f96936e43b55 100644 --- a/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java +++ b/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java @@ -54,58 +54,6 @@ import static org.hamcrest.Matchers.equalTo; public class BlendedTermQueryTests extends ESTestCase { - public void testBooleanQuery() throws IOException { - Directory dir = newDirectory(); - IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))); - String[] firstNames = new String[]{ - "simon", "paul" - }; - String[] surNames = new String[]{ - "willnauer", "simon" - }; - for (int i = 0; i < surNames.length; i++) { - Document d = new Document(); - d.add(new TextField("id", Integer.toString(i), Field.Store.YES)); - d.add(new TextField("firstname", firstNames[i], Field.Store.NO)); - d.add(new TextField("surname", surNames[i], Field.Store.NO)); - w.addDocument(d); - } - int iters = scaledRandomIntBetween(25, 100); - for (int j = 0; j < iters; j++) { - Document d = new Document(); - d.add(new TextField("id", Integer.toString(firstNames.length + j), Field.Store.YES)); - d.add(new TextField("firstname", rarely() ? "some_other_name" : - "simon the sorcerer", Field.Store.NO)); // make sure length-norm is the tie-breaker - d.add(new TextField("surname", "bogus", Field.Store.NO)); - w.addDocument(d); - } - w.commit(); - DirectoryReader reader = DirectoryReader.open(w); - IndexSearcher searcher = setSimilarity(newSearcher(reader)); - - { - Term[] terms = new Term[]{new Term("firstname", "simon"), new Term("surname", "simon")}; - BlendedTermQuery query = BlendedTermQuery.booleanBlendedQuery(terms); - TopDocs search = searcher.search(query, 3); - ScoreDoc[] scoreDocs = search.scoreDocs; - assertEquals(3, scoreDocs.length); - assertEquals(Integer.toString(0), reader.document(scoreDocs[0].doc).getField("id").stringValue()); - } - { - BooleanQuery.Builder query = new BooleanQuery.Builder(); - query.add(new TermQuery(new Term("firstname", "simon")), BooleanClause.Occur.SHOULD); - query.add(new TermQuery(new Term("surname", "simon")), BooleanClause.Occur.SHOULD); - TopDocs search = searcher.search(query.build(), 1); - ScoreDoc[] scoreDocs = search.scoreDocs; - assertEquals(Integer.toString(1), reader.document(scoreDocs[0].doc).getField("id").stringValue()); - - } - reader.close(); - w.close(); - dir.close(); - - } - public void testDismaxQuery() throws IOException { Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))); @@ -183,12 +131,11 @@ public void testBasics() { } String term = TestUtil.randomRealisticUnicodeString(random(), 1, 10); Term[] terms = toTerms(fields, term); - boolean useBoolean = random().nextBoolean(); float tieBreaker = random().nextFloat(); - BlendedTermQuery query = useBoolean ? BlendedTermQuery.booleanBlendedQuery(terms) : BlendedTermQuery.dismaxBlendedQuery(terms, tieBreaker); + BlendedTermQuery query = BlendedTermQuery.dismaxBlendedQuery(terms, tieBreaker); QueryUtils.check(query); terms = toTerms(fields, term); - BlendedTermQuery query2 = useBoolean ? BlendedTermQuery.booleanBlendedQuery(terms) : BlendedTermQuery.dismaxBlendedQuery(terms, tieBreaker); + BlendedTermQuery query2 = BlendedTermQuery.dismaxBlendedQuery(terms, tieBreaker); assertEquals(query, query2); } } @@ -217,8 +164,7 @@ public void testExtractTerms() throws IOException { terms.add(new Term(TestUtil.randomRealisticUnicodeString(random(), 1, 10), TestUtil.randomRealisticUnicodeString(random(), 1, 10))); } - BlendedTermQuery blendedTermQuery = random().nextBoolean() ? BlendedTermQuery.dismaxBlendedQuery(terms.toArray(new Term[0]), random().nextFloat()) : - BlendedTermQuery.booleanBlendedQuery(terms.toArray(new Term[0])); + BlendedTermQuery blendedTermQuery = BlendedTermQuery.dismaxBlendedQuery(terms.toArray(new Term[0]), random().nextFloat()); Set extracted = new HashSet<>(); IndexSearcher searcher = new IndexSearcher(new MultiReader()); searcher.createNormalizedWeight(blendedTermQuery, false).extractTerms(extracted); diff --git a/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatterTests.java b/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatterTests.java deleted file mode 100644 index 2d43a1ca64ef4..0000000000000 --- a/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatterTests.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.lucene.search.postingshighlight; - -import org.apache.lucene.search.highlight.Snippet; -import org.apache.lucene.search.highlight.DefaultEncoder; -import org.apache.lucene.search.highlight.SimpleHTMLEncoder; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.test.ESTestCase; - -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.notNullValue; - - -public class CustomPassageFormatterTests extends ESTestCase { - public void testSimpleFormat() { - String content = "This is a really cool highlighter. Postings highlighter gives nice snippets back. No matches here."; - - CustomPassageFormatter passageFormatter = new CustomPassageFormatter("", "", new DefaultEncoder()); - - Passage[] passages = new Passage[3]; - String match = "highlighter"; - BytesRef matchBytesRef = new BytesRef(match); - - Passage passage1 = new Passage(); - int start = content.indexOf(match); - int end = start + match.length(); - passage1.startOffset = 0; - passage1.endOffset = end + 2; //lets include the whitespace at the end to make sure we trim it - passage1.addMatch(start, end, matchBytesRef); - passages[0] = passage1; - - Passage passage2 = new Passage(); - start = content.lastIndexOf(match); - end = start + match.length(); - passage2.startOffset = passage1.endOffset; - passage2.endOffset = end + 26; - passage2.addMatch(start, end, matchBytesRef); - passages[1] = passage2; - - Passage passage3 = new Passage(); - passage3.startOffset = passage2.endOffset; - passage3.endOffset = content.length(); - passages[2] = passage3; - - Snippet[] fragments = passageFormatter.format(passages, content); - assertThat(fragments, notNullValue()); - assertThat(fragments.length, equalTo(3)); - assertThat(fragments[0].getText(), equalTo("This is a really cool highlighter.")); - assertThat(fragments[0].isHighlighted(), equalTo(true)); - assertThat(fragments[1].getText(), equalTo("Postings highlighter gives nice snippets back.")); - assertThat(fragments[1].isHighlighted(), equalTo(true)); - assertThat(fragments[2].getText(), equalTo("No matches here.")); - assertThat(fragments[2].isHighlighted(), equalTo(false)); - } - - public void testHtmlEncodeFormat() { - String content = "This is a really cool highlighter. Postings highlighter gives nice snippets back."; - - CustomPassageFormatter passageFormatter = new CustomPassageFormatter("", "", new SimpleHTMLEncoder()); - - Passage[] passages = new Passage[2]; - String match = "highlighter"; - BytesRef matchBytesRef = new BytesRef(match); - - Passage passage1 = new Passage(); - int start = content.indexOf(match); - int end = start + match.length(); - passage1.startOffset = 0; - passage1.endOffset = end + 6; //lets include the whitespace at the end to make sure we trim it - passage1.addMatch(start, end, matchBytesRef); - passages[0] = passage1; - - Passage passage2 = new Passage(); - start = content.lastIndexOf(match); - end = start + match.length(); - passage2.startOffset = passage1.endOffset; - passage2.endOffset = content.length(); - passage2.addMatch(start, end, matchBytesRef); - passages[1] = passage2; - - Snippet[] fragments = passageFormatter.format(passages, content); - assertThat(fragments, notNullValue()); - assertThat(fragments.length, equalTo(2)); - assertThat(fragments[0].getText(), equalTo("<b>This is a really cool highlighter.</b>")); - assertThat(fragments[1].getText(), equalTo("Postings highlighter gives nice snippets back.")); - } -} diff --git a/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java b/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java deleted file mode 100644 index 315e38d12feb5..0000000000000 --- a/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.lucene.search.postingshighlight; - -import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.FieldType; -import org.apache.lucene.document.TextField; -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.highlight.Snippet; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.highlight.DefaultEncoder; -import org.apache.lucene.store.Directory; -import org.elasticsearch.search.fetch.subphase.highlight.HighlightUtils; -import org.elasticsearch.test.ESTestCase; - -import static org.hamcrest.CoreMatchers.equalTo; - -public class CustomPostingsHighlighterTests extends ESTestCase { - public void testCustomPostingsHighlighter() throws Exception { - Directory dir = newDirectory(); - IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random())); - iwc.setMergePolicy(newLogMergePolicy()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc); - - FieldType offsetsType = new FieldType(TextField.TYPE_STORED); - offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS); - - //good position but only one match - final String firstValue = "This is a test. Just a test1 highlighting from postings highlighter."; - Field body = new Field("body", "", offsetsType); - Document doc = new Document(); - doc.add(body); - body.setStringValue(firstValue); - - //two matches, not the best snippet due to its length though - final String secondValue = "This is the second highlighting value to perform highlighting on a longer text that gets scored lower."; - Field body2 = new Field("body", "", offsetsType); - doc.add(body2); - body2.setStringValue(secondValue); - - //two matches and short, will be scored highest - final String thirdValue = "This is highlighting the third short highlighting value."; - Field body3 = new Field("body", "", offsetsType); - doc.add(body3); - body3.setStringValue(thirdValue); - - //one match, same as first but at the end, will be scored lower due to its position - final String fourthValue = "Just a test4 highlighting from postings highlighter."; - Field body4 = new Field("body", "", offsetsType); - doc.add(body4); - body4.setStringValue(fourthValue); - - iw.addDocument(doc); - - IndexReader ir = iw.getReader(); - iw.close(); - - String firstHlValue = "Just a test1 highlighting from postings highlighter."; - String secondHlValue = "This is the second highlighting value to perform highlighting on a longer text that gets scored lower."; - String thirdHlValue = "This is highlighting the third short highlighting value."; - String fourthHlValue = "Just a test4 highlighting from postings highlighter."; - - IndexSearcher searcher = newSearcher(ir); - Query query = new TermQuery(new Term("body", "highlighting")); - - TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); - assertThat(topDocs.totalHits, equalTo(1)); - - int docId = topDocs.scoreDocs[0].doc; - - String fieldValue = firstValue + HighlightUtils.PARAGRAPH_SEPARATOR + secondValue + HighlightUtils.PARAGRAPH_SEPARATOR + thirdValue + HighlightUtils.PARAGRAPH_SEPARATOR + fourthValue; - - CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(null, new CustomPassageFormatter("", "", new DefaultEncoder()), fieldValue, false); - Snippet[] snippets = highlighter.highlightField("body", query, searcher, docId, 5); - - assertThat(snippets.length, equalTo(4)); - - assertThat(snippets[0].getText(), equalTo(firstHlValue)); - assertThat(snippets[1].getText(), equalTo(secondHlValue)); - assertThat(snippets[2].getText(), equalTo(thirdHlValue)); - assertThat(snippets[3].getText(), equalTo(fourthHlValue)); - - ir.close(); - dir.close(); - } - - public void testNoMatchSize() throws Exception { - Directory dir = newDirectory(); - IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random())); - iwc.setMergePolicy(newLogMergePolicy()); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc); - - FieldType offsetsType = new FieldType(TextField.TYPE_STORED); - offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS); - Field body = new Field("body", "", offsetsType); - Field none = new Field("none", "", offsetsType); - Document doc = new Document(); - doc.add(body); - doc.add(none); - - String firstValue = "This is a test. Just a test highlighting from postings. Feel free to ignore."; - body.setStringValue(firstValue); - none.setStringValue(firstValue); - iw.addDocument(doc); - - IndexReader ir = iw.getReader(); - iw.close(); - - Query query = new TermQuery(new Term("none", "highlighting")); - - IndexSearcher searcher = newSearcher(ir); - TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); - assertThat(topDocs.totalHits, equalTo(1)); - int docId = topDocs.scoreDocs[0].doc; - - CustomPassageFormatter passageFormatter = new CustomPassageFormatter("", "", new DefaultEncoder()); - - CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(null, passageFormatter, firstValue, false); - Snippet[] snippets = highlighter.highlightField("body", query, searcher, docId, 5); - assertThat(snippets.length, equalTo(0)); - - highlighter = new CustomPostingsHighlighter(null, passageFormatter, firstValue, true); - snippets = highlighter.highlightField("body", query, searcher, docId, 5); - assertThat(snippets.length, equalTo(1)); - assertThat(snippets[0].getText(), equalTo("This is a test.")); - - ir.close(); - dir.close(); - } -} diff --git a/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomSeparatorBreakIteratorTests.java b/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomSeparatorBreakIteratorTests.java deleted file mode 100644 index 17aeb869c1a04..0000000000000 --- a/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomSeparatorBreakIteratorTests.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.lucene.search.postingshighlight; - -import org.elasticsearch.search.fetch.subphase.highlight.HighlightUtils; -import org.elasticsearch.test.ESTestCase; - -import java.text.BreakIterator; -import java.text.CharacterIterator; -import java.text.StringCharacterIterator; -import java.util.Locale; - -import static org.hamcrest.CoreMatchers.equalTo; - -public class CustomSeparatorBreakIteratorTests extends ESTestCase { - public void testBreakOnCustomSeparator() throws Exception { - Character separator = randomSeparator(); - BreakIterator bi = new CustomSeparatorBreakIterator(separator); - String source = "this" + separator + "is" + separator + "the" + separator + "first" + separator + "sentence"; - bi.setText(source); - assertThat(bi.current(), equalTo(0)); - assertThat(bi.first(), equalTo(0)); - assertThat(source.substring(bi.current(), bi.next()), equalTo("this" + separator)); - assertThat(source.substring(bi.current(), bi.next()), equalTo("is" + separator)); - assertThat(source.substring(bi.current(), bi.next()), equalTo("the" + separator)); - assertThat(source.substring(bi.current(), bi.next()), equalTo("first" + separator)); - assertThat(source.substring(bi.current(), bi.next()), equalTo("sentence")); - assertThat(bi.next(), equalTo(BreakIterator.DONE)); - - assertThat(bi.last(), equalTo(source.length())); - int current = bi.current(); - assertThat(source.substring(bi.previous(), current), equalTo("sentence")); - current = bi.current(); - assertThat(source.substring(bi.previous(), current), equalTo("first" + separator)); - current = bi.current(); - assertThat(source.substring(bi.previous(), current), equalTo("the" + separator)); - current = bi.current(); - assertThat(source.substring(bi.previous(), current), equalTo("is" + separator)); - current = bi.current(); - assertThat(source.substring(bi.previous(), current), equalTo("this" + separator)); - assertThat(bi.previous(), equalTo(BreakIterator.DONE)); - assertThat(bi.current(), equalTo(0)); - - assertThat(source.substring(0, bi.following(9)), equalTo("this" + separator + "is" + separator + "the" + separator)); - - assertThat(source.substring(0, bi.preceding(9)), equalTo("this" + separator + "is" + separator)); - - assertThat(bi.first(), equalTo(0)); - assertThat(source.substring(0, bi.next(3)), equalTo("this" + separator + "is" + separator + "the" + separator)); - } - - public void testSingleSentences() throws Exception { - BreakIterator expected = BreakIterator.getSentenceInstance(Locale.ROOT); - BreakIterator actual = new CustomSeparatorBreakIterator(randomSeparator()); - assertSameBreaks("a", expected, actual); - assertSameBreaks("ab", expected, actual); - assertSameBreaks("abc", expected, actual); - assertSameBreaks("", expected, actual); - } - - public void testSliceEnd() throws Exception { - BreakIterator expected = BreakIterator.getSentenceInstance(Locale.ROOT); - BreakIterator actual = new CustomSeparatorBreakIterator(randomSeparator()); - assertSameBreaks("a000", 0, 1, expected, actual); - assertSameBreaks("ab000", 0, 1, expected, actual); - assertSameBreaks("abc000", 0, 1, expected, actual); - assertSameBreaks("000", 0, 0, expected, actual); - } - - public void testSliceStart() throws Exception { - BreakIterator expected = BreakIterator.getSentenceInstance(Locale.ROOT); - BreakIterator actual = new CustomSeparatorBreakIterator(randomSeparator()); - assertSameBreaks("000a", 3, 1, expected, actual); - assertSameBreaks("000ab", 3, 2, expected, actual); - assertSameBreaks("000abc", 3, 3, expected, actual); - assertSameBreaks("000", 3, 0, expected, actual); - } - - public void testSliceMiddle() throws Exception { - BreakIterator expected = BreakIterator.getSentenceInstance(Locale.ROOT); - BreakIterator actual = new CustomSeparatorBreakIterator(randomSeparator()); - assertSameBreaks("000a000", 3, 1, expected, actual); - assertSameBreaks("000ab000", 3, 2, expected, actual); - assertSameBreaks("000abc000", 3, 3, expected, actual); - assertSameBreaks("000000", 3, 0, expected, actual); - } - - /** the current position must be ignored, initial position is always first() */ - public void testFirstPosition() throws Exception { - BreakIterator expected = BreakIterator.getSentenceInstance(Locale.ROOT); - BreakIterator actual = new CustomSeparatorBreakIterator(randomSeparator()); - assertSameBreaks("000ab000", 3, 2, 4, expected, actual); - } - - private static char randomSeparator() { - return randomFrom(' ', HighlightUtils.NULL_SEPARATOR, HighlightUtils.PARAGRAPH_SEPARATOR); - } - - private static void assertSameBreaks(String text, BreakIterator expected, BreakIterator actual) { - assertSameBreaks(new StringCharacterIterator(text), - new StringCharacterIterator(text), - expected, - actual); - } - - private static void assertSameBreaks(String text, int offset, int length, BreakIterator expected, BreakIterator actual) { - assertSameBreaks(text, offset, length, offset, expected, actual); - } - - private static void assertSameBreaks(String text, int offset, int length, int current, BreakIterator expected, BreakIterator actual) { - assertSameBreaks(new StringCharacterIterator(text, offset, offset + length, current), - new StringCharacterIterator(text, offset, offset + length, current), - expected, - actual); - } - - /** Asserts that two breakiterators break the text the same way */ - private static void assertSameBreaks(CharacterIterator one, CharacterIterator two, BreakIterator expected, BreakIterator actual) { - expected.setText(one); - actual.setText(two); - - assertEquals(expected.current(), actual.current()); - - // next() - int v = expected.current(); - while (v != BreakIterator.DONE) { - assertEquals(v = expected.next(), actual.next()); - assertEquals(expected.current(), actual.current()); - } - - // first() - assertEquals(expected.first(), actual.first()); - assertEquals(expected.current(), actual.current()); - // last() - assertEquals(expected.last(), actual.last()); - assertEquals(expected.current(), actual.current()); - - // previous() - v = expected.current(); - while (v != BreakIterator.DONE) { - assertEquals(v = expected.previous(), actual.previous()); - assertEquals(expected.current(), actual.current()); - } - - // following() - for (int i = one.getBeginIndex(); i <= one.getEndIndex(); i++) { - expected.first(); - actual.first(); - assertEquals(expected.following(i), actual.following(i)); - assertEquals(expected.current(), actual.current()); - } - - // preceding() - for (int i = one.getBeginIndex(); i <= one.getEndIndex(); i++) { - expected.last(); - actual.last(); - assertEquals(expected.preceding(i), actual.preceding(i)); - assertEquals(expected.current(), actual.current()); - } - } -} diff --git a/core/src/test/java/org/apache/lucene/search/uhighlight/CustomPassageFormatterTests.java b/core/src/test/java/org/apache/lucene/search/uhighlight/CustomPassageFormatterTests.java index 4e664c3e24101..0b8bccb784f24 100644 --- a/core/src/test/java/org/apache/lucene/search/uhighlight/CustomPassageFormatterTests.java +++ b/core/src/test/java/org/apache/lucene/search/uhighlight/CustomPassageFormatterTests.java @@ -19,7 +19,6 @@ package org.apache.lucene.search.uhighlight; -import org.apache.lucene.search.highlight.Snippet; import org.apache.lucene.search.highlight.DefaultEncoder; import org.apache.lucene.search.highlight.SimpleHTMLEncoder; import org.apache.lucene.util.BytesRef; diff --git a/core/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java b/core/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java index 35cde8e02e050..eec611146a62a 100644 --- a/core/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java +++ b/core/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java @@ -41,7 +41,6 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.highlight.DefaultEncoder; -import org.apache.lucene.search.highlight.Snippet; import org.apache.lucene.store.Directory; import org.elasticsearch.common.Strings; import org.elasticsearch.common.lucene.all.AllTermQuery; diff --git a/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 4add6bce90071..84664eaeea376 100644 --- a/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -720,7 +720,7 @@ public void testIds() { ids.put(57, org.elasticsearch.indices.IndexTemplateMissingException.class); ids.put(58, org.elasticsearch.transport.SendRequestTransportException.class); ids.put(59, org.elasticsearch.common.util.concurrent.EsRejectedExecutionException.class); - ids.put(60, org.elasticsearch.common.lucene.Lucene.EarlyTerminationException.class); + ids.put(60, null); // EarlyTerminationException was removed in 6.0 ids.put(61, null); // RoutingValidationException was removed in 5.0 ids.put(62, org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.class); ids.put(63, org.elasticsearch.indices.AliasFilterParsingException.class); diff --git a/core/src/test/java/org/elasticsearch/VersionTests.java b/core/src/test/java/org/elasticsearch/VersionTests.java index 226c3fad03a1e..d8cd635f33f6b 100644 --- a/core/src/test/java/org/elasticsearch/VersionTests.java +++ b/core/src/test/java/org/elasticsearch/VersionTests.java @@ -254,7 +254,7 @@ public void testAllVersionsMatchId() throws Exception { final Set unreleasedVersions = new HashSet<>(VersionUtils.allUnreleasedVersions()); Map maxBranchVersions = new HashMap<>(); for (java.lang.reflect.Field field : Version.class.getFields()) { - if (field.getName().matches("_ID(_UNRELEASED)?")) { + if (field.getName().matches("_ID")) { assertTrue(field.getName() + " should be static", Modifier.isStatic(field.getModifiers())); assertTrue(field.getName() + " should be final", Modifier.isFinal(field.getModifiers())); int versionId = (Integer)field.get(Version.class); @@ -293,7 +293,12 @@ public void testAllVersionsMatchId() throws Exception { if (maxBranchVersion == null) { maxBranchVersions.put(branchName, v); } else if (v.after(maxBranchVersion)) { - assertFalse("Version " + maxBranchVersion + " cannot be a snapshot because version " + v + " exists", VersionUtils.isSnapshot(maxBranchVersion)); + if (v == Version.CURRENT) { + // Current is weird - it counts as released even though it shouldn't. + continue; + } + assertFalse("Version " + maxBranchVersion + " cannot be a snapshot because version " + v + " exists", + VersionUtils.allUnreleasedVersions().contains(maxBranchVersion)); maxBranchVersions.put(branchName, v); } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index f461be77e0c65..3c55d0df9c142 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -335,4 +335,12 @@ public void testInvalidPartitionSize() { assertTrue(createPartitionedIndex.apply(1, 1)); } + + public void testIndexNameInResponse() { + CreateIndexResponse response = prepareCreate("foo") + .setSettings(Settings.builder().build()) + .get(); + + assertEquals("Should have index name in response", "foo", response.index()); + } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java new file mode 100644 index 0000000000000..588659335e499 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.create; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +public class CreateIndexResponseTests extends ESTestCase { + + public void testSerialization() throws IOException { + CreateIndexResponse response = new CreateIndexResponse(true, true, "foo"); + + try (BytesStreamOutput output = new BytesStreamOutput()) { + response.writeTo(output); + + try (StreamInput in = output.bytes().streamInput()) { + CreateIndexResponse serialized = new CreateIndexResponse(); + serialized.readFrom(in); + assertEquals(response.isShardsAcked(), serialized.isShardsAcked()); + assertEquals(response.isAcknowledged(), serialized.isAcknowledged()); + assertEquals(response.index(), serialized.index()); + } + } + } + + public void testSerializationWithOldVersion() throws IOException { + Version oldVersion = Version.V_5_4_0; + CreateIndexResponse response = new CreateIndexResponse(true, true, "foo"); + + try (BytesStreamOutput output = new BytesStreamOutput()) { + output.setVersion(oldVersion); + response.writeTo(output); + + try (StreamInput in = output.bytes().streamInput()) { + in.setVersion(oldVersion); + CreateIndexResponse serialized = new CreateIndexResponse(); + serialized.readFrom(in); + assertEquals(response.isShardsAcked(), serialized.isShardsAcked()); + assertEquals(response.isAcknowledged(), serialized.isAcknowledged()); + assertNull(serialized.index()); + } + } + } +} diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java index 2bd13669fee26..a34e6bcc0c409 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.admin.indices.get; +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; import org.elasticsearch.cluster.metadata.AliasMetaData; @@ -281,6 +282,8 @@ private void assertEmptyMappings(GetIndexResponse response) { private void assertEmptyAliases(GetIndexResponse response) { assertThat(response.aliases(), notNullValue()); - assertThat(response.aliases().isEmpty(), equalTo(true)); + for (final ObjectObjectCursor> entry : response.getAliases()) { + assertTrue(entry.value.isEmpty()); + } } } diff --git a/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java b/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java index 5ab9e1ea5351e..1d6b77fc747c7 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java @@ -118,7 +118,7 @@ boolean shouldAutoCreate(String index, ClusterState state) { @Override void createIndex(String index, TimeValue timeout, ActionListener listener) { // If we try to create an index just immediately assume it worked - listener.onResponse(new CreateIndexResponse(true, true) {}); + listener.onResponse(new CreateIndexResponse(true, true, index) {}); } }; action.doExecute(null, bulkRequest, null); diff --git a/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index 39a4bb2feca3f..aa7f613a176af 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -541,11 +541,13 @@ public void testNoOpReplicationOnPrimaryDocumentFailure() throws Exception { itemRequests[0] = itemRequest; BulkShardRequest bulkShardRequest = new BulkShardRequest( shard.shardId(), RefreshPolicy.NONE, itemRequests); + bulkShardRequest.primaryTerm(randomIntBetween(1, (int) shard.getPrimaryTerm())); TransportShardBulkAction.performOnReplica(bulkShardRequest, shard); ArgumentCaptor noOp = ArgumentCaptor.forClass(Engine.NoOp.class); verify(shard, times(1)).markSeqNoAsNoOp(noOp.capture()); final Engine.NoOp noOpValue = noOp.getValue(); assertThat(noOpValue.seqNo(), equalTo(1L)); + assertThat(noOpValue.primaryTerm(), equalTo(bulkShardRequest.primaryTerm())); assertThat(noOpValue.reason(), containsString(failureMessage)); closeShards(shard); } diff --git a/core/src/test/java/org/elasticsearch/action/search/ClearScrollControllerTests.java b/core/src/test/java/org/elasticsearch/action/search/ClearScrollControllerTests.java new file mode 100644 index 0000000000000..5037ffe03f962 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/search/ClearScrollControllerTests.java @@ -0,0 +1,256 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.search; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.SearchPhaseResult; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.NodeNotConnectedException; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportResponse; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; + +public class ClearScrollControllerTests extends ESTestCase { + + public void testClearAll() throws IOException, InterruptedException { + DiscoveryNode node1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode node2 = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode node3 = new DiscoveryNode("node_3", buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).add(node3).build(); + CountDownLatch latch = new CountDownLatch(1); + ActionListener listener = new ActionListener() { + @Override + public void onResponse(ClearScrollResponse clearScrollResponse) { + try { + assertEquals(3, clearScrollResponse.getNumFreed()); + assertTrue(clearScrollResponse.isSucceeded()); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + try { + throw new AssertionError(e); + } finally { + latch.countDown(); + } + } + }; + List nodesInvoked = new CopyOnWriteArrayList<>(); + SearchTransportService searchTransportService = new SearchTransportService(Settings.EMPTY, null) { + @Override + public void sendClearAllScrollContexts(Transport.Connection connection, ActionListener listener) { + nodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onResponse(TransportResponse.Empty.INSTANCE)); // response is unused + t.start(); + } + + @Override + Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); + clearScrollRequest.scrollIds(Arrays.asList("_all")); + ClearScrollController controller = new ClearScrollController(clearScrollRequest, listener, + nodes, logger, searchTransportService); + controller.run(); + latch.await(); + assertEquals(3, nodesInvoked.size()); + Collections.sort(nodesInvoked, Comparator.comparing(DiscoveryNode::getId)); + assertEquals(nodesInvoked, Arrays.asList(node1, node2, node3)); + } + + + public void testClearScrollIds() throws IOException, InterruptedException { + DiscoveryNode node1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode node2 = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode node3 = new DiscoveryNode("node_3", buildNewFakeTransportAddress(), Version.CURRENT); + AtomicArray array = new AtomicArray<>(3); + SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult1 = new SearchAsyncActionTests.TestSearchPhaseResult(1, node1); + testSearchPhaseResult1.setSearchShardTarget(new SearchShardTarget("node_1", new ShardId("idx", "uuid1", 2), null, null)); + SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult2 = new SearchAsyncActionTests.TestSearchPhaseResult(12, node2); + testSearchPhaseResult2.setSearchShardTarget(new SearchShardTarget("node_2", new ShardId("idy", "uuid2", 42), null, null)); + SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult3 = new SearchAsyncActionTests.TestSearchPhaseResult(42, node3); + testSearchPhaseResult3.setSearchShardTarget(new SearchShardTarget("node_3", new ShardId("idy", "uuid2", 43), null, null)); + array.setOnce(0, testSearchPhaseResult1); + array.setOnce(1, testSearchPhaseResult2); + array.setOnce(2, testSearchPhaseResult3); + AtomicInteger numFreed = new AtomicInteger(0); + String scrollId = TransportSearchHelper.buildScrollId(array); + DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).add(node3).build(); + CountDownLatch latch = new CountDownLatch(1); + ActionListener listener = new ActionListener() { + @Override + public void onResponse(ClearScrollResponse clearScrollResponse) { + try { + assertEquals(numFreed.get(), clearScrollResponse.getNumFreed()); + assertTrue(clearScrollResponse.isSucceeded()); + } finally { + latch.countDown(); + } + + } + + @Override + public void onFailure(Exception e) { + try { + throw new AssertionError(e); + } finally { + latch.countDown(); + } + } + }; + List nodesInvoked = new CopyOnWriteArrayList<>(); + SearchTransportService searchTransportService = new SearchTransportService(Settings.EMPTY, null) { + + @Override + public void sendFreeContext(Transport.Connection connection, long contextId, + ActionListener listener) { + nodesInvoked.add(connection.getNode()); + boolean freed = randomBoolean(); + if (freed) { + numFreed.incrementAndGet(); + } + Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(freed))); + t.start(); + } + + @Override + Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); + clearScrollRequest.scrollIds(Arrays.asList(scrollId)); + ClearScrollController controller = new ClearScrollController(clearScrollRequest, listener, + nodes, logger, searchTransportService); + controller.run(); + latch.await(); + assertEquals(3, nodesInvoked.size()); + Collections.sort(nodesInvoked, Comparator.comparing(DiscoveryNode::getId)); + assertEquals(nodesInvoked, Arrays.asList(node1, node2, node3)); + } + + public void testClearScrollIdsWithFailure() throws IOException, InterruptedException { + DiscoveryNode node1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode node2 = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode node3 = new DiscoveryNode("node_3", buildNewFakeTransportAddress(), Version.CURRENT); + AtomicArray array = new AtomicArray<>(3); + SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult1 = new SearchAsyncActionTests.TestSearchPhaseResult(1, node1); + testSearchPhaseResult1.setSearchShardTarget(new SearchShardTarget("node_1", new ShardId("idx", "uuid1", 2), null, null)); + SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult2 = new SearchAsyncActionTests.TestSearchPhaseResult(12, node2); + testSearchPhaseResult2.setSearchShardTarget(new SearchShardTarget("node_2", new ShardId("idy", "uuid2", 42), null, null)); + SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult3 = new SearchAsyncActionTests.TestSearchPhaseResult(42, node3); + testSearchPhaseResult3.setSearchShardTarget(new SearchShardTarget("node_3", new ShardId("idy", "uuid2", 43), null, null)); + array.setOnce(0, testSearchPhaseResult1); + array.setOnce(1, testSearchPhaseResult2); + array.setOnce(2, testSearchPhaseResult3); + AtomicInteger numFreed = new AtomicInteger(0); + AtomicInteger numFailures = new AtomicInteger(0); + AtomicInteger numConnectionFailures = new AtomicInteger(0); + String scrollId = TransportSearchHelper.buildScrollId(array); + DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).add(node3).build(); + CountDownLatch latch = new CountDownLatch(1); + + ActionListener listener = new ActionListener() { + @Override + public void onResponse(ClearScrollResponse clearScrollResponse) { + try { + assertEquals(numFreed.get(), clearScrollResponse.getNumFreed()); + if (numFailures.get() > 0) { + assertFalse(clearScrollResponse.isSucceeded()); + } else { + assertTrue(clearScrollResponse.isSucceeded()); + } + + } finally { + latch.countDown(); + } + + } + + @Override + public void onFailure(Exception e) { + try { + throw new AssertionError(e); + } finally { + latch.countDown(); + } + } + }; + List nodesInvoked = new CopyOnWriteArrayList<>(); + SearchTransportService searchTransportService = new SearchTransportService(Settings.EMPTY, null) { + + @Override + public void sendFreeContext(Transport.Connection connection, long contextId, + ActionListener listener) { + nodesInvoked.add(connection.getNode()); + boolean freed = randomBoolean(); + boolean fail = randomBoolean(); + Thread t = new Thread(() -> { + if (fail) { + numFailures.incrementAndGet(); + listener.onFailure(new IllegalArgumentException("boom")); + } else { + if (freed) { + numFreed.incrementAndGet(); + } + listener.onResponse(new SearchFreeContextResponse(freed)); + } + }); + t.start(); + } + + @Override + Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + if (randomBoolean()) { + numFailures.incrementAndGet(); + numConnectionFailures.incrementAndGet(); + throw new NodeNotConnectedException(node, "boom"); + } + return new SearchAsyncActionTests.MockConnection(node); + } + }; + ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); + clearScrollRequest.scrollIds(Arrays.asList(scrollId)); + ClearScrollController controller = new ClearScrollController(clearScrollRequest, listener, + nodes, logger, searchTransportService); + controller.run(); + latch.await(); + assertEquals(3 - numConnectionFailures.get(), nodesInvoked.size()); + } +} diff --git a/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java b/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java index 39890038f2a2a..878cb7e61266b 100644 --- a/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java @@ -214,7 +214,7 @@ public void writeTo(StreamOutput out) throws IOException { } } - public final class MockConnection implements Transport.Connection { + public static final class MockConnection implements Transport.Connection { private final DiscoveryNode node; diff --git a/core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index c92caef628a9b..9a1e78e298794 100644 --- a/core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -148,29 +148,35 @@ public void testMerge() throws IOException { int nShards = randomIntBetween(1, 20); int queryResultSize = randomBoolean() ? 0 : randomIntBetween(1, nShards * 2); AtomicArray queryResults = generateQueryResults(nShards, suggestions, queryResultSize, false); - SearchPhaseController.ReducedQueryPhase reducedQueryPhase = searchPhaseController.reducedQueryPhase(queryResults.asList(), false); - AtomicArray searchPhaseResultAtomicArray = generateFetchResults(nShards, reducedQueryPhase.scoreDocs, - reducedQueryPhase.suggest); - InternalSearchResponse mergedResponse = searchPhaseController.merge(false, - reducedQueryPhase, - searchPhaseResultAtomicArray.asList(), searchPhaseResultAtomicArray::get); - int suggestSize = 0; - for (Suggest.Suggestion s : reducedQueryPhase.suggest) { - Stream stream = s.getEntries().stream(); - suggestSize += stream.collect(Collectors.summingInt(e -> e.getOptions().size())); - } - assertThat(suggestSize, lessThanOrEqualTo(maxSuggestSize)); - assertThat(mergedResponse.hits().getHits().length, equalTo(reducedQueryPhase.scoreDocs.length-suggestSize)); - Suggest suggestResult = mergedResponse.suggest(); - for (Suggest.Suggestion suggestion : reducedQueryPhase.suggest) { - assertThat(suggestion, instanceOf(CompletionSuggestion.class)); - if (suggestion.getEntries().get(0).getOptions().size() > 0) { - CompletionSuggestion suggestionResult = suggestResult.getSuggestion(suggestion.getName()); - assertNotNull(suggestionResult); - List options = suggestionResult.getEntries().get(0).getOptions(); - assertThat(options.size(), equalTo(suggestion.getEntries().get(0).getOptions().size())); - for (CompletionSuggestion.Entry.Option option : options) { - assertNotNull(option.getHit()); + for (boolean trackTotalHits : new boolean[] {true, false}) { + SearchPhaseController.ReducedQueryPhase reducedQueryPhase = + searchPhaseController.reducedQueryPhase(queryResults.asList(), false, trackTotalHits); + AtomicArray searchPhaseResultAtomicArray = generateFetchResults(nShards, reducedQueryPhase.scoreDocs, + reducedQueryPhase.suggest); + InternalSearchResponse mergedResponse = searchPhaseController.merge(false, + reducedQueryPhase, + searchPhaseResultAtomicArray.asList(), searchPhaseResultAtomicArray::get); + if (trackTotalHits == false) { + assertThat(mergedResponse.hits.totalHits, equalTo(-1L)); + } + int suggestSize = 0; + for (Suggest.Suggestion s : reducedQueryPhase.suggest) { + Stream stream = s.getEntries().stream(); + suggestSize += stream.collect(Collectors.summingInt(e -> e.getOptions().size())); + } + assertThat(suggestSize, lessThanOrEqualTo(maxSuggestSize)); + assertThat(mergedResponse.hits().getHits().length, equalTo(reducedQueryPhase.scoreDocs.length - suggestSize)); + Suggest suggestResult = mergedResponse.suggest(); + for (Suggest.Suggestion suggestion : reducedQueryPhase.suggest) { + assertThat(suggestion, instanceOf(CompletionSuggestion.class)); + if (suggestion.getEntries().get(0).getOptions().size() > 0) { + CompletionSuggestion suggestionResult = suggestResult.getSuggestion(suggestion.getName()); + assertNotNull(suggestionResult); + List options = suggestionResult.getEntries().get(0).getOptions(); + assertThat(options.size(), equalTo(suggestion.getEntries().get(0).getOptions().size())); + for (CompletionSuggestion.Entry.Option option : options) { + assertNotNull(option.getHit()); + } } } } diff --git a/core/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java b/core/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java index 5cc92cb7d87b0..c91fd7377a5fc 100644 --- a/core/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java @@ -49,6 +49,7 @@ import java.util.List; import static java.util.Collections.singletonMap; +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; public class SearchResponseTests extends ESTestCase { @@ -78,31 +79,71 @@ protected NamedXContentRegistry xContentRegistry() { } private SearchResponse createTestItem(ShardSearchFailure... shardSearchFailures) { - SearchHits hits = SearchHitsTests.createTestItem(); + return createTestItem(false, shardSearchFailures); + } + + /** + * This SearchResponse doesn't include SearchHits, Aggregations, Suggestions, ShardSearchFailures, SearchProfileShardResults + * to make it possible to only test properties of the SearchResponse itself + */ + private SearchResponse createMinimalTestItem() { + return createTestItem(true); + } + + /** + * if minimal is set, don't include search hits, aggregations, suggest etc... to make test simpler + */ + private SearchResponse createTestItem(boolean minimal, ShardSearchFailure... shardSearchFailures) { boolean timedOut = randomBoolean(); Boolean terminatedEarly = randomBoolean() ? null : randomBoolean(); int numReducePhases = randomIntBetween(1, 10); long tookInMillis = randomNonNegativeLong(); int successfulShards = randomInt(); int totalShards = randomInt(); - - InternalAggregations aggregations = aggregationsTests.createTestInstance(); - Suggest suggest = SuggestTests.createTestItem(); - SearchProfileShardResults profileShardResults = SearchProfileShardResultsTests.createTestItem(); - - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, aggregations, suggest, profileShardResults, + InternalSearchResponse internalSearchResponse; + if (minimal == false) { + SearchHits hits = SearchHitsTests.createTestItem(); + InternalAggregations aggregations = aggregationsTests.createTestInstance(); + Suggest suggest = SuggestTests.createTestItem(); + SearchProfileShardResults profileShardResults = SearchProfileShardResultsTests.createTestItem(); + internalSearchResponse = new InternalSearchResponse(hits, aggregations, suggest, profileShardResults, timedOut, terminatedEarly, numReducePhases); + } else { + internalSearchResponse = InternalSearchResponse.empty(); + } return new SearchResponse(internalSearchResponse, null, totalShards, successfulShards, tookInMillis, shardSearchFailures); } + /** + * the "_shard/total/failures" section makes it impossible to directly + * compare xContent, so we omit it here + */ public void testFromXContent() throws IOException { - // the "_shard/total/failures" section makes if impossible to directly compare xContent, so we omit it here - SearchResponse response = createTestItem(); + doFromXContentTestWithRandomFields(createTestItem(), false); + } + + /** + * This test adds random fields and objects to the xContent rendered out to + * ensure we can parse it back to be forward compatible with additions to + * the xContent. We test this with a "minimal" SearchResponse, adding random + * fields to SearchHits, Aggregations etc... is tested in their own tests + */ + public void testFromXContentWithRandomFields() throws IOException { + doFromXContentTestWithRandomFields(createMinimalTestItem(), true); + } + + private void doFromXContentTestWithRandomFields(SearchResponse response, boolean addRandomFields) throws IOException { XContentType xcontentType = randomFrom(XContentType.values()); boolean humanReadable = randomBoolean(); final ToXContent.Params params = new ToXContent.MapParams(singletonMap(RestSearchAction.TYPED_KEYS_PARAM, "true")); BytesReference originalBytes = toShuffledXContent(response, xcontentType, params, humanReadable); - try (XContentParser parser = createParser(xcontentType.xContent(), originalBytes)) { + BytesReference mutated; + if (addRandomFields) { + mutated = insertRandomFields(xcontentType, originalBytes, null, random()); + } else { + mutated = originalBytes; + } + try (XContentParser parser = createParser(xcontentType.xContent(), mutated)) { SearchResponse parsed = SearchResponse.fromXContent(parser); assertToXContentEquivalent(originalBytes, XContentHelper.toXContent(parsed, xcontentType, params, humanReadable), xcontentType); assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); diff --git a/core/src/test/java/org/elasticsearch/action/search/ShardSearchFailureTests.java b/core/src/test/java/org/elasticsearch/action/search/ShardSearchFailureTests.java index 3c88551acbf73..9a8c0b1feb1d3 100644 --- a/core/src/test/java/org/elasticsearch/action/search/ShardSearchFailureTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/ShardSearchFailureTests.java @@ -33,6 +33,7 @@ import java.io.IOException; import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; public class ShardSearchFailureTests extends ESTestCase { @@ -48,13 +49,31 @@ public static ShardSearchFailure createTestItem() { } public void testFromXContent() throws IOException { + doFromXContentTestWithRandomFields(false); + } + + /** + * This test adds random fields and objects to the xContent rendered out to + * ensure we can parse it back to be forward compatible with additions to + * the xContent + */ + public void testFromXContentWithRandomFields() throws IOException { + doFromXContentTestWithRandomFields(true); + } + + private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException { ShardSearchFailure response = createTestItem(); XContentType xContentType = randomFrom(XContentType.values()); boolean humanReadable = randomBoolean(); BytesReference originalBytes = toShuffledXContent(response, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); - + BytesReference mutated; + if (addRandomFields) { + mutated = insertRandomFields(xContentType, originalBytes, null, random()); + } else { + mutated = originalBytes; + } ShardSearchFailure parsed; - try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { + try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); parsed = ShardSearchFailure.fromXContent(parser); assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); @@ -64,8 +83,11 @@ public void testFromXContent() throws IOException { assertEquals(response.shard().getNodeId(), parsed.shard().getNodeId()); assertEquals(response.shardId(), parsed.shardId()); - // we cannot compare the cause, because it will be wrapped in an outer ElasticSearchException - // best effort: try to check that the original message appears somewhere in the rendered xContent + /** + * we cannot compare the cause, because it will be wrapped in an outer + * ElasticSearchException best effort: try to check that the original + * message appears somewhere in the rendered xContent + */ String originalMsg = response.getCause().getMessage(); assertEquals(parsed.getCause().getMessage(), "Elasticsearch exception [type=parsing_exception, reason=" + originalMsg + "]"); String nestedMsg = response.getCause().getCause().getMessage(); diff --git a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index e872d3d854ecc..8b389d69d382b 100644 --- a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -159,7 +159,7 @@ public void testFromXContent() throws Exception { // simple verbose script request.fromXContent(createParser(XContentFactory.jsonBuilder().startObject() - .startObject("script").field("inline", "script1").endObject() + .startObject("script").field("source", "script1").endObject() .endObject())); script = request.script(); assertThat(script, notNullValue()); @@ -173,7 +173,7 @@ public void testFromXContent() throws Exception { request = new UpdateRequest("test", "type", "1"); request.fromXContent(createParser(XContentFactory.jsonBuilder().startObject() .startObject("script") - .field("inline", "script1") + .field("source", "script1") .startObject("params") .field("param1", "value1") .endObject() @@ -195,7 +195,7 @@ public void testFromXContent() throws Exception { .startObject("params") .field("param1", "value1") .endObject() - .field("inline", "script1") + .field("source", "script1") .endObject() .endObject())); script = request.script(); @@ -215,7 +215,7 @@ public void testFromXContent() throws Exception { .startObject("params") .field("param1", "value1") .endObject() - .field("inline", "script1") + .field("source", "script1") .endObject() .startObject("upsert") .field("field1", "value1") @@ -249,7 +249,7 @@ public void testFromXContent() throws Exception { .startObject("params") .field("param1", "value1") .endObject() - .field("inline", "script1") + .field("source", "script1") .endObject().endObject())); script = request.script(); assertThat(script, notNullValue()); diff --git a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java index 5de9c211a6c45..9526b5b97e6eb 100644 --- a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java +++ b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.aliases; +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistResponse; @@ -32,6 +33,7 @@ import org.elasticsearch.cluster.metadata.AliasOrIndex; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.StopWatch; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; @@ -49,6 +51,7 @@ import java.util.Arrays; import java.util.HashSet; +import java.util.List; import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; @@ -567,20 +570,24 @@ public void testIndicesGetAliases() throws Exception { logger.info("--> getting alias1"); GetAliasesResponse getResponse = admin().indices().prepareGetAliases("alias1").get(); assertThat(getResponse, notNullValue()); - assertThat(getResponse.getAliases().size(), equalTo(1)); + assertThat(getResponse.getAliases().size(), equalTo(5)); assertThat(getResponse.getAliases().get("foobar").size(), equalTo(1)); assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("alias1")); assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue()); + assertTrue(getResponse.getAliases().get("test").isEmpty()); + assertTrue(getResponse.getAliases().get("test123").isEmpty()); + assertTrue(getResponse.getAliases().get("foobarbaz").isEmpty()); + assertTrue(getResponse.getAliases().get("bazbar").isEmpty()); AliasesExistResponse existsResponse = admin().indices().prepareAliasesExist("alias1").get(); assertThat(existsResponse.exists(), equalTo(true)); logger.info("--> getting all aliases that start with alias*"); getResponse = admin().indices().prepareGetAliases("alias*").get(); assertThat(getResponse, notNullValue()); - assertThat(getResponse.getAliases().size(), equalTo(1)); + assertThat(getResponse.getAliases().size(), equalTo(5)); assertThat(getResponse.getAliases().get("foobar").size(), equalTo(2)); assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("alias1")); @@ -592,6 +599,10 @@ public void testIndicesGetAliases() throws Exception { assertThat(getResponse.getAliases().get("foobar").get(1).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(1).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(1).getSearchRouting(), nullValue()); + assertTrue(getResponse.getAliases().get("test").isEmpty()); + assertTrue(getResponse.getAliases().get("test123").isEmpty()); + assertTrue(getResponse.getAliases().get("foobarbaz").isEmpty()); + assertTrue(getResponse.getAliases().get("bazbar").isEmpty()); existsResponse = admin().indices().prepareAliasesExist("alias*").get(); assertThat(existsResponse.exists(), equalTo(true)); @@ -676,12 +687,13 @@ public void testIndicesGetAliases() throws Exception { logger.info("--> getting f* for index *bar"); getResponse = admin().indices().prepareGetAliases("f*").addIndices("*bar").get(); assertThat(getResponse, notNullValue()); - assertThat(getResponse.getAliases().size(), equalTo(1)); + assertThat(getResponse.getAliases().size(), equalTo(2)); assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("foo")); assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue()); + assertTrue(getResponse.getAliases().get("bazbar").isEmpty()); existsResponse = admin().indices().prepareAliasesExist("f*") .addIndices("*bar").get(); assertThat(existsResponse.exists(), equalTo(true)); @@ -690,13 +702,14 @@ public void testIndicesGetAliases() throws Exception { logger.info("--> getting f* for index *bac"); getResponse = admin().indices().prepareGetAliases("foo").addIndices("*bac").get(); assertThat(getResponse, notNullValue()); - assertThat(getResponse.getAliases().size(), equalTo(1)); + assertThat(getResponse.getAliases().size(), equalTo(2)); assertThat(getResponse.getAliases().get("foobar").size(), equalTo(1)); assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("foo")); assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue()); + assertTrue(getResponse.getAliases().get("bazbar").isEmpty()); existsResponse = admin().indices().prepareAliasesExist("foo") .addIndices("*bac").get(); assertThat(existsResponse.exists(), equalTo(true)); @@ -729,7 +742,9 @@ public void testIndicesGetAliases() throws Exception { .removeAlias("foobar", "foo")); getResponse = admin().indices().prepareGetAliases("foo").addIndices("foobar").get(); - assertThat(getResponse.getAliases().isEmpty(), equalTo(true)); + for (final ObjectObjectCursor> entry : getResponse.getAliases()) { + assertTrue(entry.value.isEmpty()); + } existsResponse = admin().indices().prepareAliasesExist("foo").addIndices("foobar").get(); assertThat(existsResponse.exists(), equalTo(false)); } diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java index 5aa6b904f819d..21dd76b67e6e5 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java @@ -176,7 +176,8 @@ void unloadIndex(String indexName) throws Exception { public void testAllVersionsTested() throws Exception { SortedSet expectedVersions = new TreeSet<>(); for (Version v : VersionUtils.allReleasedVersions()) { - if (VersionUtils.isSnapshot(v)) continue; // snapshots are unreleased, so there is no backcompat yet + // The current version is in the "released" list even though it isn't released for historical reasons + if (v == Version.CURRENT) continue; if (v.isRelease() == false) continue; // no guarantees for prereleases if (v.before(Version.CURRENT.minimumIndexCompatibilityVersion())) continue; // we can only support one major version backward if (v.equals(Version.CURRENT)) continue; // the current version is always compatible with itself diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java index 394f09120d344..9ee8fa654b28f 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java @@ -89,7 +89,8 @@ public void testRestoreOldSnapshots() throws Exception { SortedSet expectedVersions = new TreeSet<>(); for (Version v : VersionUtils.allReleasedVersions()) { - if (VersionUtils.isSnapshot(v)) continue; // snapshots are unreleased, so there is no backcompat yet + // The current version is in the "released" list even though it isn't released for historical reasons + if (v == Version.CURRENT) continue; if (v.isRelease() == false) continue; // no guarantees for prereleases if (v.before(Version.CURRENT.minimumIndexCompatibilityVersion())) continue; // we only support versions N and N-1 if (v.equals(Version.CURRENT)) continue; // the current version is always compatible with itself diff --git a/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java b/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java index a1132647c7eea..753aedea01e02 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java @@ -377,7 +377,7 @@ public void testAsSequentialAccessBits() throws Exception { Weight termWeight = new TermQuery(new Term("foo", "bar")).createWeight(searcher, false, 1f); assertEquals(1, reader.leaves().size()); LeafReaderContext leafReaderContext = searcher.getIndexReader().leaves().get(0); - Bits bits = Lucene.asSequentialAccessBits(leafReaderContext.reader().maxDoc(), termWeight.scorer(leafReaderContext)); + Bits bits = Lucene.asSequentialAccessBits(leafReaderContext.reader().maxDoc(), termWeight.scorerSupplier(leafReaderContext)); expectThrows(IndexOutOfBoundsException.class, () -> bits.get(-1)); expectThrows(IndexOutOfBoundsException.class, () -> bits.get(leafReaderContext.reader().maxDoc())); diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index bb9ec29f1ada9..16e746a67f7bd 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -224,12 +224,12 @@ public void setUp() throws Exception { codecName = "default"; } defaultSettings = IndexSettingsModule.newIndexSettings("test", Settings.builder() - .put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), "1h") // make sure this doesn't kick in on us - .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codecName) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.getKey(), - between(10, 10 * IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.get(Settings.EMPTY))) - .build()); // TODO randomize more settings + .put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), "1h") // make sure this doesn't kick in on us + .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codecName) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.getKey(), + between(10, 10 * IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.get(Settings.EMPTY))) + .build()); // TODO randomize more settings threadPool = new TestThreadPool(getClass().getName()); store = createStore(); storeReplica = createStore(); @@ -272,14 +272,14 @@ public EngineConfig copy(EngineConfig config, EngineConfig.OpenMode openMode, An public void tearDown() throws Exception { super.tearDown(); IOUtils.close( - replicaEngine, storeReplica, - engine, store); + replicaEngine, storeReplica, + engine, store); terminate(threadPool); } private static Document testDocumentWithTextField() { - return testDocumentWithTextField("test"); + return testDocumentWithTextField("test"); } private static Document testDocumentWithTextField(String value) { @@ -319,6 +319,7 @@ protected Store createStore() throws IOException { protected Store createStore(final Directory directory) throws IOException { return createStore(INDEX_SETTINGS, directory); } + protected Store createStore(final IndexSettings indexSettings, final Directory directory) throws IOException { final DirectoryService directoryService = new DirectoryService(shardId, indexSettings) { @Override @@ -351,6 +352,7 @@ protected InternalEngine createEngine(IndexSettings indexSettings, Store store, return createEngine(indexSettings, store, translogPath, mergePolicy, null); } + protected InternalEngine createEngine(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, @Nullable IndexWriterFactory indexWriterFactory) throws IOException { return createEngine(indexSettings, store, translogPath, mergePolicy, indexWriterFactory, null); @@ -392,12 +394,12 @@ public static InternalEngine createInternalEngine(@Nullable final IndexWriterFac @Nullable final Function sequenceNumbersServiceSupplier, final EngineConfig config) { return new InternalEngine(config) { - @Override - IndexWriter createWriter(Directory directory, IndexWriterConfig iwc) throws IOException { - return (indexWriterFactory != null) ? - indexWriterFactory.createWriter(directory, iwc) : - super.createWriter(directory, iwc); - } + @Override + IndexWriter createWriter(Directory directory, IndexWriterConfig iwc) throws IOException { + return (indexWriterFactory != null) ? + indexWriterFactory.createWriter(directory, iwc) : + super.createWriter(directory, iwc); + } @Override public SequenceNumbersService seqNoService() { @@ -436,9 +438,9 @@ public void onFailedEngine(String reason, @Nullable Exception e) { final List refreshListenerList = refreshListener == null ? emptyList() : Collections.singletonList(refreshListener); EngineConfig config = new EngineConfig(openMode, shardId, threadPool, indexSettings, null, store, - mergePolicy, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), listener, - IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, - TimeValue.timeValueMinutes(5), refreshListenerList, indexSort, handler); + mergePolicy, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), listener, + IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, + TimeValue.timeValueMinutes(5), refreshListenerList, indexSort, handler); return config; } @@ -454,7 +456,7 @@ private static BytesArray bytesArray(String string) { public void testSegments() throws Exception { try (Store store = createStore(); - Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { + Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { List segments = engine.segments(false); assertThat(segments.isEmpty(), equalTo(true)); assertThat(engine.segmentsStats(false).getCount(), equalTo(0L)); @@ -603,7 +605,7 @@ public void testVerboseSegments() throws Exception { public void testSegmentsWithMergeFlag() throws Exception { try (Store store = createStore(); - Engine engine = createEngine(defaultSettings, store, createTempDir(), new TieredMergePolicy())) { + Engine engine = createEngine(defaultSettings, store, createTempDir(), new TieredMergePolicy())) { ParsedDocument doc = testParsedDocument("1", null, testDocument(), B_1, null); Engine.Index index = indexForDoc(doc); engine.index(index); @@ -686,7 +688,7 @@ public void testSegmentsWithIndexSort() throws Exception { public void testSegmentsStatsIncludingFileSizes() throws Exception { try (Store store = createStore(); - Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { + Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { assertThat(engine.segmentsStats(true).getFileSizes().size(), equalTo(0)); ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null); @@ -1162,7 +1164,7 @@ public void testCommitAdvancesMinTranslogForRecovery() throws IOException { public void testSyncedFlush() throws IOException { try (Store store = createStore(); - Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), new LogByteSizeMergePolicy(), null))) { + Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), new LogByteSizeMergePolicy(), null))) { final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20); ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null); engine.index(indexForDoc(doc)); @@ -1172,13 +1174,13 @@ public void testSyncedFlush() throws IOException { wrongBytes[0] = (byte) ~wrongBytes[0]; Engine.CommitId wrongId = new Engine.CommitId(wrongBytes); assertEquals("should fail to sync flush with wrong id (but no docs)", engine.syncFlush(syncId + "1", wrongId), - Engine.SyncedFlushResult.COMMIT_MISMATCH); + Engine.SyncedFlushResult.COMMIT_MISMATCH); engine.index(indexForDoc(doc)); assertEquals("should fail to sync flush with right id but pending doc", engine.syncFlush(syncId + "2", commitID), - Engine.SyncedFlushResult.PENDING_OPERATIONS); + Engine.SyncedFlushResult.PENDING_OPERATIONS); commitID = engine.flush(); assertEquals("should succeed to flush commit with right id and no pending doc", engine.syncFlush(syncId, commitID), - Engine.SyncedFlushResult.SUCCESS); + Engine.SyncedFlushResult.SUCCESS); assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); } @@ -1189,7 +1191,7 @@ public void testRenewSyncFlush() throws Exception { for (int i = 0; i < iters; i++) { try (Store store = createStore(); InternalEngine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), - new LogDocMergePolicy(), null))) { + new LogDocMergePolicy(), null))) { final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20); Engine.Index doc1 = indexForDoc(testParsedDocument("1", null, testDocumentWithTextField(), B_1, null)); engine.index(doc1); @@ -1208,7 +1210,7 @@ public void testRenewSyncFlush() throws Exception { } Engine.CommitId commitID = engine.flush(); assertEquals("should succeed to flush commit with right id and no pending doc", engine.syncFlush(syncId, commitID), - Engine.SyncedFlushResult.SUCCESS); + Engine.SyncedFlushResult.SUCCESS); assertEquals(3, engine.segments(false).size()); engine.forceMerge(forceMergeFlushes, 1, false, false, false); @@ -1248,7 +1250,7 @@ public void testSyncedFlushSurvivesEngineRestart() throws IOException { engine.index(indexForDoc(doc)); final Engine.CommitId commitID = engine.flush(); assertEquals("should succeed to flush commit with right id and no pending doc", engine.syncFlush(syncId, commitID), - Engine.SyncedFlushResult.SUCCESS); + Engine.SyncedFlushResult.SUCCESS); assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); EngineConfig config = engine.config(); @@ -1271,7 +1273,7 @@ public void testSyncedFlushVanishesOnReplay() throws IOException { engine.index(indexForDoc(doc)); final Engine.CommitId commitID = engine.flush(); assertEquals("should succeed to flush commit with right id and no pending doc", engine.syncFlush(syncId, commitID), - Engine.SyncedFlushResult.SUCCESS); + Engine.SyncedFlushResult.SUCCESS); assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); doc = testParsedDocument("2", null, testDocumentWithTextField(), new BytesArray("{}"), null); @@ -1307,8 +1309,8 @@ public void testVersioningNewIndex() throws IOException { public void testForceMerge() throws IOException { try (Store store = createStore(); - Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), - new LogByteSizeMergePolicy(), null))) { // use log MP here we test some behavior in ESMP + Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), + new LogByteSizeMergePolicy(), null))) { // use log MP here we test some behavior in ESMP int numDocs = randomIntBetween(10, 100); for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null); @@ -1422,7 +1424,7 @@ protected List generateSingleDocHistory(boolean forReplica, Ve final Term id = newUid("1"); final int startWithSeqNo; if (partialOldPrimary) { - startWithSeqNo = randomBoolean() ? numOfOps - 1 : randomIntBetween(0, numOfOps - 1); + startWithSeqNo = randomBoolean() ? numOfOps - 1 : randomIntBetween(0, numOfOps - 1); } else { startWithSeqNo = 0; } @@ -1541,7 +1543,8 @@ private void assertOpsOnReplica(List ops, InternalEngine repli } if (randomBoolean()) { engine.refresh("test"); - } if (randomBoolean()) { + } + if (randomBoolean()) { engine.flush(); } firstOp = false; @@ -1598,9 +1601,9 @@ private void concurrentlyApplyOps(List ops, InternalEngine eng try { final Engine.Operation op = ops.get(docOffset); if (op instanceof Engine.Index) { - engine.index((Engine.Index)op); + engine.index((Engine.Index) op); } else { - engine.delete((Engine.Delete)op); + engine.delete((Engine.Delete) op); } if ((docOffset + 1) % 4 == 0) { engine.refresh("test"); @@ -1641,7 +1644,7 @@ private int assertOpsOnPrimary(List ops, long currentOpVersion final long correctVersion = docDeleted && randomBoolean() ? Versions.MATCH_DELETED : lastOpVersion; logger.info("performing [{}]{}{}", op.operationType().name().charAt(0), - versionConflict ? " (conflict " + conflictingVersion +")" : "", + versionConflict ? " (conflict " + conflictingVersion + ")" : "", versionedOp ? " (versioned " + correctVersion + ")" : ""); if (op instanceof Engine.Index) { final Engine.Index index = (Engine.Index) op; @@ -1811,7 +1814,7 @@ public void testVersioningPromotedReplica() throws IOException { assertOpsOnReplica(replicaOps, replicaEngine, true); final int opsOnPrimary = assertOpsOnPrimary(primaryOps, finalReplicaVersion, deletedOnReplica, replicaEngine); final long currentSeqNo = getSequenceID(replicaEngine, - new Engine.Get(false, "type", lastReplicaOp.uid().text(), lastReplicaOp.uid())).v1(); + new Engine.Get(false, "type", lastReplicaOp.uid().text(), lastReplicaOp.uid())).v1(); try (Searcher searcher = engine.acquireSearcher("test")) { final TotalHitCountCollector collector = new TotalHitCountCollector(); searcher.searcher().search(new MatchAllDocsQuery(), collector); @@ -1936,7 +1939,7 @@ public void testBasicCreatedFlag() throws IOException { indexResult = engine.index(index); assertFalse(indexResult.isCreated()); - engine.delete(new Engine.Delete(null, "1", newUid(doc))); + engine.delete(new Engine.Delete("doc", "1", newUid(doc))); index = indexForDoc(doc); indexResult = engine.index(index); @@ -2169,11 +2172,11 @@ public void testConcurrentWritesAndCommits() throws Exception { final IndexCommit commit = commitRef.getIndexCommit(); Map userData = commit.getUserData(); long localCheckpoint = userData.containsKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY) ? - Long.parseLong(userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) : - SequenceNumbersService.NO_OPS_PERFORMED; + Long.parseLong(userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) : + SequenceNumbersService.NO_OPS_PERFORMED; long maxSeqNo = userData.containsKey(SequenceNumbers.MAX_SEQ_NO) ? - Long.parseLong(userData.get(SequenceNumbers.MAX_SEQ_NO)) : - SequenceNumbersService.UNASSIGNED_SEQ_NO; + Long.parseLong(userData.get(SequenceNumbers.MAX_SEQ_NO)) : + SequenceNumbersService.UNASSIGNED_SEQ_NO; // local checkpoint and max seq no shouldn't go backwards assertThat(localCheckpoint, greaterThanOrEqualTo(prevLocalCheckpoint)); assertThat(maxSeqNo, greaterThanOrEqualTo(prevMaxSeqNo)); @@ -2192,7 +2195,7 @@ public void testConcurrentWritesAndCommits() throws Exception { FixedBitSet seqNosBitSet = getSeqNosSet(reader, highestSeqNo); for (int i = 0; i <= localCheckpoint; i++) { assertTrue("local checkpoint [" + localCheckpoint + "], _seq_no [" + i + "] should be indexed", - seqNosBitSet.get(i)); + seqNosBitSet.get(i)); } } prevLocalCheckpoint = localCheckpoint; @@ -2268,7 +2271,7 @@ public void testIndexWriterIFDInfoStream() throws IllegalAccessException, IOExce public void testEnableGcDeletes() throws Exception { try (Store store = createStore(); - Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), newMergePolicy(), null))) { + Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), newMergePolicy(), null))) { engine.config().setEnableGcDeletes(false); final Function searcherFactory = engine::acquireSearcher; @@ -2341,7 +2344,7 @@ private Engine.Index indexForDoc(ParsedDocument doc) { private Engine.Index replicaIndexForDoc(ParsedDocument doc, long version, long seqNo, boolean isRetry) { - return new Engine.Index(newUid(doc), doc, seqNo, 1, version, VersionType.EXTERNAL, + return new Engine.Index(newUid(doc), doc, seqNo, 1, version, VersionType.EXTERNAL, Engine.Operation.Origin.REPLICA, System.nanoTime(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, isRetry); } @@ -2484,6 +2487,38 @@ private static void assertVisibleCount(InternalEngine engine, int numDocs, boole } } + public void testTranslogCleanUpPostCommitCrash() throws Exception { + try (Store store = createStore()) { + AtomicBoolean throwErrorOnCommit = new AtomicBoolean(); + final Path translogPath = createTempDir(); + try (InternalEngine engine = new InternalEngine(config(defaultSettings, store, translogPath, newMergePolicy(), null, null)) { + @Override + protected void commitIndexWriter(IndexWriter writer, Translog translog, String syncId) throws IOException { + super.commitIndexWriter(writer, translog, syncId); + if (throwErrorOnCommit.get()) { + throw new RuntimeException("power's out"); + } + } + }) { + final ParsedDocument doc1 = testParsedDocument("1", null, testDocumentWithTextField(), SOURCE, null); + engine.index(indexForDoc(doc1)); + throwErrorOnCommit.set(true); + FlushFailedEngineException e = expectThrows(FlushFailedEngineException.class, engine::flush); + assertThat(e.getCause().getMessage(), equalTo("power's out")); + } + try (InternalEngine engine = new InternalEngine(config(defaultSettings, store, translogPath, newMergePolicy(), null, null))) { + engine.recoverFromTranslog(); + assertVisibleCount(engine, 1); + final long committedGen = Long.valueOf( + engine.getLastCommittedSegmentInfos().getUserData().get(Translog.TRANSLOG_GENERATION_KEY)); + for (int gen = 1; gen < committedGen; gen++) { + final Path genFile = translogPath.resolve(Translog.getFilename(gen)); + assertFalse(genFile + " wasn't cleaned up", Files.exists(genFile)); + } + } + } + } + public void testSkipTranslogReplay() throws IOException { final int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java index dfb3a3c1b3ec2..64dcf0a0943b9 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java @@ -19,11 +19,9 @@ package org.elasticsearch.index.fielddata; -import com.carrotsearch.hppc.ObjectArrayList; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; @@ -31,8 +29,8 @@ import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; +import java.util.ArrayList; import java.util.List; -import static org.hamcrest.Matchers.equalTo; public class BinaryDVFieldDataTests extends AbstractFieldDataTestCase { @Override @@ -53,15 +51,21 @@ public void testDocValue() throws Exception { final DocumentMapper mapper = mapperService.documentMapperParser().parse("test", new CompressedXContent(mapping)); - ObjectArrayList bytesList1 = new ObjectArrayList<>(2); + List bytesList1 = new ArrayList<>(2); bytesList1.add(randomBytes()); bytesList1.add(randomBytes()); - XContentBuilder doc = XContentFactory.jsonBuilder().startObject().startArray("field").value(bytesList1.get(0)).value(bytesList1.get(1)).endArray().endObject(); - ParsedDocument d = mapper.parse(SourceToParse.source("test", "test", "1", - doc.bytes(), XContentType.JSON)); + XContentBuilder doc = XContentFactory.jsonBuilder().startObject(); + { + doc.startArray("field"); + doc.value(bytesList1.get(0)); + doc.value(bytesList1.get(1)); + doc.endArray(); + } + doc.endObject(); + ParsedDocument d = mapper.parse(SourceToParse.source("test", "test", "1", doc.bytes(), XContentType.JSON)); writer.addDocument(d.rootDoc()); - byte[] bytes1 = randomBytes(); + BytesRef bytes1 = randomBytes(); doc = XContentFactory.jsonBuilder().startObject().field("field", bytes1).endObject(); d = mapper.parse(SourceToParse.source("test", "test", "2", doc.bytes(), XContentType.JSON)); writer.addDocument(d.rootDoc()); @@ -71,45 +75,75 @@ public void testDocValue() throws Exception { writer.addDocument(d.rootDoc()); // test remove duplicate value - ObjectArrayList bytesList2 = new ObjectArrayList<>(2); + List bytesList2 = new ArrayList<>(2); bytesList2.add(randomBytes()); bytesList2.add(randomBytes()); - doc = XContentFactory.jsonBuilder().startObject().startArray("field").value(bytesList2.get(0)).value(bytesList2.get(1)).value(bytesList2.get(0)).endArray().endObject(); + doc = XContentFactory.jsonBuilder().startObject(); + { + doc.startArray("field"); + doc.value(bytesList2.get(0)); + doc.value(bytesList2.get(1)); + doc.value(bytesList2.get(0)); + doc.endArray(); + } + doc.endObject(); d = mapper.parse(SourceToParse.source("test", "test", "4", doc.bytes(), XContentType.JSON)); writer.addDocument(d.rootDoc()); - List readers = refreshReader(); IndexFieldData indexFieldData = getForField("field"); - for (LeafReaderContext reader : readers) { - AtomicFieldData fieldData = indexFieldData.load(reader); - - SortedBinaryDocValues bytesValues = fieldData.getBytesValues(); - - CollectionUtils.sortAndDedup(bytesList1); - assertTrue(bytesValues.advanceExact(0)); - assertThat(bytesValues.docValueCount(), equalTo(2)); - assertThat(bytesValues.nextValue(), equalTo(new BytesRef(bytesList1.get(0)))); - assertThat(bytesValues.nextValue(), equalTo(new BytesRef(bytesList1.get(1)))); - - assertTrue(bytesValues.advanceExact(1)); - assertThat(bytesValues.docValueCount(), equalTo(1)); - assertThat(bytesValues.nextValue(), equalTo(new BytesRef(bytes1))); - - assertFalse(bytesValues.advanceExact(2)); - - CollectionUtils.sortAndDedup(bytesList2); - assertTrue(bytesValues.advanceExact(3)); - assertThat(bytesValues.docValueCount(), equalTo(2)); - assertThat(bytesValues.nextValue(), equalTo(new BytesRef(bytesList2.get(0)))); - assertThat(bytesValues.nextValue(), equalTo(new BytesRef(bytesList2.get(1)))); - } + List readers = refreshReader(); + assertEquals(1, readers.size()); + LeafReaderContext reader = readers.get(0); + + bytesList1.sort(null); + bytesList2.sort(null); + + // Test SortedBinaryDocValues's decoding: + AtomicFieldData fieldData = indexFieldData.load(reader); + SortedBinaryDocValues bytesValues = fieldData.getBytesValues(); + + assertTrue(bytesValues.advanceExact(0)); + assertEquals(2, bytesValues.docValueCount()); + assertEquals(bytesList1.get(0), bytesValues.nextValue()); + assertEquals(bytesList1.get(1), bytesValues.nextValue()); + + assertTrue(bytesValues.advanceExact(1)); + assertEquals(1, bytesValues.docValueCount()); + assertEquals(bytes1, bytesValues.nextValue()); + + assertFalse(bytesValues.advanceExact(2)); + + assertTrue(bytesValues.advanceExact(3)); + assertEquals(2, bytesValues.docValueCount()); + assertEquals(bytesList2.get(0), bytesValues.nextValue()); + assertEquals(bytesList2.get(1), bytesValues.nextValue()); + + // Test whether ScriptDocValues.BytesRefs makes a deepcopy + fieldData = indexFieldData.load(reader); + ScriptDocValues scriptValues = fieldData.getScriptValues(); + scriptValues.setNextDocId(0); + assertEquals(2, scriptValues.size()); + assertEquals(bytesList1.get(0), scriptValues.get(0)); + assertEquals(bytesList1.get(1), scriptValues.get(1)); + + scriptValues.setNextDocId(1); + assertEquals(1, scriptValues.size()); + assertEquals(bytes1, scriptValues.get(0)); + + scriptValues.setNextDocId(2); + assertEquals(0, scriptValues.size()); + + scriptValues.setNextDocId(3); + assertEquals(2, scriptValues.size()); + assertEquals(bytesList2.get(0), scriptValues.get(0)); + assertEquals(bytesList2.get(1), scriptValues.get(1)); } - private byte[] randomBytes() { + private static BytesRef randomBytes() { int size = randomIntBetween(10, 1000); byte[] bytes = new byte[size]; random().nextBytes(bytes); - return bytes; + return new BytesRef(bytes); } @Override diff --git a/core/src/test/java/org/elasticsearch/index/mapper/AllFieldIT.java b/core/src/test/java/org/elasticsearch/index/mapper/AllFieldIT.java new file mode 100644 index 0000000000000..2be58b3b68e6b --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/mapper/AllFieldIT.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.Version; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; + +import java.util.Arrays; +import java.util.Collection; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; + +public class AllFieldIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(InternalSettingsPlugin.class); // uses index.version.created + } + + public void test5xIndicesContinueToUseAll() throws Exception { + // Default 5.x settings + assertAcked(prepareCreate("test").setSettings("index.version.created", Version.V_5_1_1.id)); + client().prepareIndex("test", "type", "1").setSource("body", "foo").get(); + refresh(); + SearchResponse resp = client().prepareSearch("test").setQuery(QueryBuilders.matchQuery("_all", "foo")).get(); + assertHitCount(resp, 1); + assertSearchHits(resp, "1"); + + // _all explicitly enabled + assertAcked(prepareCreate("test2") + .setSource(jsonBuilder() + .startObject() + .startObject("mappings") + .startObject("type") + .startObject("_all") + .field("enabled", true) + .endObject() // _all + .endObject() // type + .endObject() // mappings + .endObject()) + .setSettings("index.version.created", Version.V_5_4_0_ID)); + client().prepareIndex("test2", "type", "1").setSource("foo", "bar").get(); + refresh(); + resp = client().prepareSearch("test2").setQuery(QueryBuilders.matchQuery("_all", "bar")).get(); + assertHitCount(resp, 1); + assertSearchHits(resp, "1"); + + // _all explicitly disabled + assertAcked(prepareCreate("test3") + .setSource(jsonBuilder() + .startObject() + .startObject("mappings") + .startObject("type") + .startObject("_all") + .field("enabled", false) + .endObject() // _all + .endObject() // type + .endObject() // mappings + .endObject()) + .setSettings("index.version.created", Version.V_5_4_0_ID)); + client().prepareIndex("test3", "type", "1").setSource("foo", "baz").get(); + refresh(); + resp = client().prepareSearch("test3").setQuery(QueryBuilders.matchQuery("_all", "baz")).get(); + assertHitCount(resp, 0); + + // _all present, but not enabled or disabled (default settings) + assertAcked(prepareCreate("test4") + .setSource(jsonBuilder() + .startObject() + .startObject("mappings") + .startObject("type") + .startObject("_all") + .endObject() // _all + .endObject() // type + .endObject() // mappings + .endObject()) + .setSettings("index.version.created", Version.V_5_4_0_ID)); + client().prepareIndex("test4", "type", "1").setSource("foo", "eggplant").get(); + refresh(); + resp = client().prepareSearch("test4").setQuery(QueryBuilders.matchQuery("_all", "eggplant")).get(); + assertHitCount(resp, 1); + assertSearchHits(resp, "1"); + } + +} diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index 49864768edf8b..d3d099672bac3 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; @@ -148,6 +149,92 @@ public void testDotsWithDynamicNestedMapper() throws Exception { e.getMessage()); } + public void testNestedHaveIdAndTypeFields() throws Exception { + DocumentMapperParser mapperParser1 = createIndex("index1", Settings.builder() + .put("index.mapping.single_type", false).build() + ).mapperService().documentMapperParser(); + DocumentMapperParser mapperParser2 = createIndex("index2", Settings.builder() + .put("index.mapping.single_type", true).build() + ).mapperService().documentMapperParser(); + + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties"); + { + mapping.startObject("foo"); + mapping.field("type", "nested"); + { + mapping.startObject("properties"); + { + + mapping.startObject("bar"); + mapping.field("type", "keyword"); + mapping.endObject(); + } + mapping.endObject(); + } + mapping.endObject(); + } + { + mapping.startObject("baz"); + mapping.field("type", "keyword"); + mapping.endObject(); + } + mapping.endObject().endObject().endObject(); + DocumentMapper mapper1 = mapperParser1.parse("type", new CompressedXContent(mapping.string())); + DocumentMapper mapper2 = mapperParser2.parse("type", new CompressedXContent(mapping.string())); + + XContentBuilder doc = XContentFactory.jsonBuilder().startObject(); + { + doc.startArray("foo"); + { + doc.startObject(); + doc.field("bar", "value1"); + doc.endObject(); + } + doc.endArray(); + doc.field("baz", "value2"); + } + doc.endObject(); + + // Verify in the case where multiple types are allowed that the _uid field is added to nested documents: + ParsedDocument result = mapper1.parse(SourceToParse.source("index1", "type", "1", doc.bytes(), XContentType.JSON)); + assertEquals(2, result.docs().size()); + // Nested document: + assertNull(result.docs().get(0).getField(IdFieldMapper.NAME)); + assertNotNull(result.docs().get(0).getField(UidFieldMapper.NAME)); + assertEquals("type#1", result.docs().get(0).getField(UidFieldMapper.NAME).stringValue()); + assertEquals(UidFieldMapper.Defaults.NESTED_FIELD_TYPE, result.docs().get(0).getField(UidFieldMapper.NAME).fieldType()); + assertNotNull(result.docs().get(0).getField(TypeFieldMapper.NAME)); + assertEquals("__foo", result.docs().get(0).getField(TypeFieldMapper.NAME).stringValue()); + assertEquals("value1", result.docs().get(0).getField("foo.bar").binaryValue().utf8ToString()); + // Root document: + assertNull(result.docs().get(1).getField(IdFieldMapper.NAME)); + assertNotNull(result.docs().get(1).getField(UidFieldMapper.NAME)); + assertEquals("type#1", result.docs().get(1).getField(UidFieldMapper.NAME).stringValue()); + assertEquals(UidFieldMapper.Defaults.FIELD_TYPE, result.docs().get(1).getField(UidFieldMapper.NAME).fieldType()); + assertNotNull(result.docs().get(1).getField(TypeFieldMapper.NAME)); + assertEquals("type", result.docs().get(1).getField(TypeFieldMapper.NAME).stringValue()); + assertEquals("value2", result.docs().get(1).getField("baz").binaryValue().utf8ToString()); + + // Verify in the case where only a single type is allowed that the _id field is added to nested documents: + result = mapper2.parse(SourceToParse.source("index2", "type", "1", doc.bytes(), XContentType.JSON)); + assertEquals(2, result.docs().size()); + // Nested document: + assertNull(result.docs().get(0).getField(UidFieldMapper.NAME)); + assertNotNull(result.docs().get(0).getField(IdFieldMapper.NAME)); + assertEquals("1", result.docs().get(0).getField(IdFieldMapper.NAME).stringValue()); + assertEquals(IdFieldMapper.Defaults.NESTED_FIELD_TYPE, result.docs().get(0).getField(IdFieldMapper.NAME).fieldType()); + assertNotNull(result.docs().get(0).getField(TypeFieldMapper.NAME)); + assertEquals("__foo", result.docs().get(0).getField(TypeFieldMapper.NAME).stringValue()); + assertEquals("value1", result.docs().get(0).getField("foo.bar").binaryValue().utf8ToString()); + // Root document: + assertNull(result.docs().get(1).getField(UidFieldMapper.NAME)); + assertNotNull(result.docs().get(1).getField(IdFieldMapper.NAME)); + assertEquals("1", result.docs().get(1).getField(IdFieldMapper.NAME).stringValue()); + assertEquals(IdFieldMapper.Defaults.FIELD_TYPE, result.docs().get(1).getField(IdFieldMapper.NAME).fieldType()); + assertNull(result.docs().get(1).getField(TypeFieldMapper.NAME)); + assertEquals("value2", result.docs().get(1).getField("baz").binaryValue().utf8ToString()); + } + public void testPropagateDynamicWithExistingMapper() throws Exception { DocumentMapperParser mapperParser = createIndex("test").mapperService().documentMapperParser(); String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") @@ -639,7 +726,7 @@ public void testDynamicDottedFieldNameLongArrayWithExistingParentWrongType() thr .value(0) .value(1) .endArray().endObject().bytes(); - MapperParsingException exception = expectThrows(MapperParsingException.class, + MapperParsingException exception = expectThrows(MapperParsingException.class, () -> mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON))); assertEquals("Could not dynamically add mapping for field [foo.bar.baz]. " + "Existing mapping for [foo] must be of type object but found [long].", exception.getMessage()); @@ -758,7 +845,7 @@ public void testDynamicDottedFieldNameLongWithExistingParentWrongType() throws E BytesReference bytes = XContentFactory.jsonBuilder() .startObject().field("foo.bar.baz", 0) .endObject().bytes(); - MapperParsingException exception = expectThrows(MapperParsingException.class, + MapperParsingException exception = expectThrows(MapperParsingException.class, () -> mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON))); assertEquals("Could not dynamically add mapping for field [foo.bar.baz]. " + "Existing mapping for [foo] must be of type object but found [long].", exception.getMessage()); @@ -880,7 +967,7 @@ public void testDynamicDottedFieldNameObjectWithExistingParentWrongType() throws BytesReference bytes = XContentFactory.jsonBuilder().startObject().startObject("foo.bar.baz").field("a", 0).endObject().endObject() .bytes(); - MapperParsingException exception = expectThrows(MapperParsingException.class, + MapperParsingException exception = expectThrows(MapperParsingException.class, () -> mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON))); assertEquals("Could not dynamically add mapping for field [foo.bar.baz]. " @@ -1017,7 +1104,7 @@ public void testNoLevel() throws Exception { .field("test2", "value2") .startObject("inner").field("inner_field", "inner_value").endObject() .endObject() - .bytes(), + .bytes(), XContentType.JSON)); assertThat(doc.rootDoc().get("test1"), equalTo("value1")); @@ -1036,7 +1123,7 @@ public void testTypeLevel() throws Exception { .field("test2", "value2") .startObject("inner").field("inner_field", "inner_value").endObject() .endObject().endObject() - .bytes(), + .bytes(), XContentType.JSON)); assertThat(doc.rootDoc().get("type.test1"), equalTo("value1")); @@ -1056,7 +1143,7 @@ public void testNoLevelWithFieldTypeAsValue() throws Exception { .field("test2", "value2") .startObject("inner").field("inner_field", "inner_value").endObject() .endObject() - .bytes(), + .bytes(), XContentType.JSON)); assertThat(doc.rootDoc().get("type"), equalTo("value_type")); @@ -1077,7 +1164,7 @@ public void testTypeLevelWithFieldTypeAsValue() throws Exception { .field("test2", "value2") .startObject("inner").field("inner_field", "inner_value").endObject() .endObject().endObject() - .bytes(), + .bytes(), XContentType.JSON)); assertThat(doc.rootDoc().get("type.type"), equalTo("value_type")); @@ -1098,7 +1185,7 @@ public void testNoLevelWithFieldTypeAsObject() throws Exception { .field("test2", "value2") .startObject("inner").field("inner_field", "inner_value").endObject() .endObject() - .bytes(), + .bytes(), XContentType.JSON)); // in this case, we analyze the type object as the actual document, and ignore the other same level fields diff --git a/core/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java index 0f976e12f39ed..861586370aef8 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java @@ -24,8 +24,10 @@ import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.Token; import org.apache.lucene.analysis.TokenStream; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -144,4 +146,55 @@ public void testEmptyName() throws IOException { ); assertThat(e.getMessage(), containsString("name cannot be empty string")); } + + public void testParseNullValue() throws Exception { + DocumentMapper mapper = createIndexWithTokenCountField(); + ParseContext.Document doc = parseDocument(mapper, createDocument(null)); + assertNull(doc.getField("test.tc")); + } + + public void testParseEmptyValue() throws Exception { + DocumentMapper mapper = createIndexWithTokenCountField(); + ParseContext.Document doc = parseDocument(mapper, createDocument("")); + assertEquals(0, doc.getField("test.tc").numericValue()); + } + + public void testParseNotNullValue() throws Exception { + DocumentMapper mapper = createIndexWithTokenCountField(); + ParseContext.Document doc = parseDocument(mapper, createDocument("three tokens string")); + assertEquals(3, doc.getField("test.tc").numericValue()); + } + + private DocumentMapper createIndexWithTokenCountField() throws IOException { + final String content = XContentFactory.jsonBuilder().startObject() + .startObject("person") + .startObject("properties") + .startObject("test") + .field("type", "text") + .startObject("fields") + .startObject("tc") + .field("type", "token_count") + .field("analyzer", "standard") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject().endObject().string(); + + return createIndex("test").mapperService().documentMapperParser().parse("person", new CompressedXContent(content)); + } + + private SourceToParse createDocument(String fieldValue) throws Exception { + BytesReference request = XContentFactory.jsonBuilder() + .startObject() + .field("test", fieldValue) + .endObject().bytes(); + + return SourceToParse.source("test", "person", "1", request, XContentType.JSON); + } + + private ParseContext.Document parseDocument(DocumentMapper mapper, SourceToParse request) { + return mapper.parse(request) + .docs().stream().findFirst().orElseThrow(() -> new IllegalStateException("Test object not parsed")); + } } diff --git a/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java index 85a368bf2b967..34adbdf25521a 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java @@ -47,6 +47,7 @@ import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBooleanSubQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertDisjunctionSubQuery; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.either; import static org.hamcrest.CoreMatchers.equalTo; @@ -183,14 +184,15 @@ public void testToQueryMultipleTermsBooleanQuery() throws Exception { assertThat(assertBooleanSubQuery(query, TermQuery.class, 1).getTerm(), equalTo(new Term(STRING_FIELD_NAME, "test2"))); } - public void testToQueryMultipleFieldsBooleanQuery() throws Exception { + public void testToQueryMultipleFieldsDisableDismax() throws Exception { assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); Query query = multiMatchQuery("test").field(STRING_FIELD_NAME).field(STRING_FIELD_NAME_2).useDisMax(false).toQuery(createShardContext()); - assertThat(query, instanceOf(BooleanQuery.class)); - BooleanQuery bQuery = (BooleanQuery) query; - assertThat(bQuery.clauses().size(), equalTo(2)); - assertThat(assertBooleanSubQuery(query, TermQuery.class, 0).getTerm(), equalTo(new Term(STRING_FIELD_NAME, "test"))); - assertThat(assertBooleanSubQuery(query, TermQuery.class, 1).getTerm(), equalTo(new Term(STRING_FIELD_NAME_2, "test"))); + assertThat(query, instanceOf(DisjunctionMaxQuery.class)); + DisjunctionMaxQuery dQuery = (DisjunctionMaxQuery) query; + assertThat(dQuery.getTieBreakerMultiplier(), equalTo(1.0f)); + assertThat(dQuery.getDisjuncts().size(), equalTo(2)); + assertThat(assertDisjunctionSubQuery(query, TermQuery.class, 0).getTerm(), equalTo(new Term(STRING_FIELD_NAME, "test"))); + assertThat(assertDisjunctionSubQuery(query, TermQuery.class, 1).getTerm(), equalTo(new Term(STRING_FIELD_NAME_2, "test"))); } public void testToQueryMultipleFieldsDisMaxQuery() throws Exception { @@ -198,6 +200,7 @@ public void testToQueryMultipleFieldsDisMaxQuery() throws Exception { Query query = multiMatchQuery("test").field(STRING_FIELD_NAME).field(STRING_FIELD_NAME_2).useDisMax(true).toQuery(createShardContext()); assertThat(query, instanceOf(DisjunctionMaxQuery.class)); DisjunctionMaxQuery disMaxQuery = (DisjunctionMaxQuery) query; + assertThat(disMaxQuery.getTieBreakerMultiplier(), equalTo(0.0f)); List disjuncts = disMaxQuery.getDisjuncts(); assertThat(disjuncts.get(0), instanceOf(TermQuery.class)); assertThat(((TermQuery) disjuncts.get(0)).getTerm(), equalTo(new Term(STRING_FIELD_NAME, "test"))); @@ -208,11 +211,12 @@ public void testToQueryMultipleFieldsDisMaxQuery() throws Exception { public void testToQueryFieldsWildcard() throws Exception { assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); Query query = multiMatchQuery("test").field("mapped_str*").useDisMax(false).toQuery(createShardContext()); - assertThat(query, instanceOf(BooleanQuery.class)); - BooleanQuery bQuery = (BooleanQuery) query; - assertThat(bQuery.clauses().size(), equalTo(2)); - assertThat(assertBooleanSubQuery(query, TermQuery.class, 0).getTerm(), equalTo(new Term(STRING_FIELD_NAME, "test"))); - assertThat(assertBooleanSubQuery(query, TermQuery.class, 1).getTerm(), equalTo(new Term(STRING_FIELD_NAME_2, "test"))); + assertThat(query, instanceOf(DisjunctionMaxQuery.class)); + DisjunctionMaxQuery dQuery = (DisjunctionMaxQuery) query; + assertThat(dQuery.getTieBreakerMultiplier(), equalTo(1.0f)); + assertThat(dQuery.getDisjuncts().size(), equalTo(2)); + assertThat(assertDisjunctionSubQuery(query, TermQuery.class, 0).getTerm(), equalTo(new Term(STRING_FIELD_NAME, "test"))); + assertThat(assertDisjunctionSubQuery(query, TermQuery.class, 1).getTerm(), equalTo(new Term(STRING_FIELD_NAME_2, "test"))); } public void testToQueryFieldMissing() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index d7e4fac854404..ca3850c411829 100644 --- a/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -46,6 +46,7 @@ import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.TooComplexToDeterminizeException; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.compress.CompressedXContent; @@ -61,11 +62,13 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.List; import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBooleanSubQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertDisjunctionSubQuery; import static org.hamcrest.CoreMatchers.either; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.containsString; @@ -270,12 +273,12 @@ public void testToQueryMultipleFieldsBooleanQuery() throws Exception { .field(STRING_FIELD_NAME_2) .useDisMax(false) .toQuery(createShardContext()); - assertThat(query, instanceOf(BooleanQuery.class)); - BooleanQuery bQuery = (BooleanQuery) query; - assertThat(bQuery.clauses().size(), equalTo(2)); - assertThat(assertBooleanSubQuery(query, TermQuery.class, 0).getTerm(), + assertThat(query, instanceOf(DisjunctionMaxQuery.class)); + DisjunctionMaxQuery bQuery = (DisjunctionMaxQuery) query; + assertThat(bQuery.getDisjuncts().size(), equalTo(2)); + assertThat(assertDisjunctionSubQuery(query, TermQuery.class, 0).getTerm(), equalTo(new Term(STRING_FIELD_NAME, "test"))); - assertThat(assertBooleanSubQuery(query, TermQuery.class, 1).getTerm(), + assertThat(assertDisjunctionSubQuery(query, TermQuery.class, 1).getTerm(), equalTo(new Term(STRING_FIELD_NAME_2, "test"))); } @@ -294,12 +297,12 @@ public void testToQueryMultipleFieldsDisMaxQuery() throws Exception { public void testToQueryFieldsWildcard() throws Exception { assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); Query query = queryStringQuery("test").field("mapped_str*").useDisMax(false).toQuery(createShardContext()); - assertThat(query, instanceOf(BooleanQuery.class)); - BooleanQuery bQuery = (BooleanQuery) query; - assertThat(bQuery.clauses().size(), equalTo(2)); - assertThat(assertBooleanSubQuery(query, TermQuery.class, 0).getTerm(), + assertThat(query, instanceOf(DisjunctionMaxQuery.class)); + DisjunctionMaxQuery dQuery = (DisjunctionMaxQuery) query; + assertThat(dQuery.getDisjuncts().size(), equalTo(2)); + assertThat(assertDisjunctionSubQuery(query, TermQuery.class, 0).getTerm(), equalTo(new Term(STRING_FIELD_NAME, "test"))); - assertThat(assertBooleanSubQuery(query, TermQuery.class, 1).getTerm(), + assertThat(assertDisjunctionSubQuery(query, TermQuery.class, 1).getTerm(), equalTo(new Term(STRING_FIELD_NAME_2, "test"))); } @@ -397,6 +400,7 @@ public void testToQueryWithGraph() throws Exception { // simple multi-term Query query = queryParser.parse("guinea pig"); + Query expectedQuery = new BooleanQuery.Builder() .add(new BooleanQuery.Builder() .add(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), Occur.MUST) @@ -448,34 +452,34 @@ public void testToQueryWithGraph() throws Exception { // span query query = queryParser.parse("\"that guinea pig smells\""); - expectedQuery = new BooleanQuery.Builder() - .add(new SpanNearQuery.Builder(STRING_FIELD_NAME, true) - .addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "that"))) - .addClause(new SpanOrQuery( + + SpanNearQuery nearQuery = new SpanNearQuery.Builder(STRING_FIELD_NAME, true) + .addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "that"))) + .addClause( + new SpanOrQuery( new SpanNearQuery.Builder(STRING_FIELD_NAME, true) .addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "guinea"))) .addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "pig"))).build(), new SpanTermQuery(new Term(STRING_FIELD_NAME, "cavy")))) .addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "smells"))) - .build(), Occur.SHOULD) - .build(); + .build(); + expectedQuery = new DisjunctionMaxQuery(Collections.singletonList(nearQuery), 1.0f); assertThat(query, Matchers.equalTo(expectedQuery)); // span query with slop query = queryParser.parse("\"that guinea pig smells\"~2"); - expectedQuery = new BooleanQuery.Builder() - .add(new SpanNearQuery.Builder(STRING_FIELD_NAME, true) - .addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "that"))) - .addClause(new SpanOrQuery( + nearQuery = new SpanNearQuery.Builder(STRING_FIELD_NAME, true) + .addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "that"))) + .addClause( + new SpanOrQuery( new SpanNearQuery.Builder(STRING_FIELD_NAME, true) .addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "guinea"))) .addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "pig"))).build(), new SpanTermQuery(new Term(STRING_FIELD_NAME, "cavy")))) - .addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "smells"))) - .setSlop(2) - .build(), - Occur.SHOULD) + .addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "smells"))) + .setSlop(2) .build(); + expectedQuery = new DisjunctionMaxQuery(Collections.singletonList(nearQuery), 1.0f); assertThat(query, Matchers.equalTo(expectedQuery)); } } @@ -830,6 +834,9 @@ public void testToQuerySplitOnWhitespace() throws IOException { public void testExistsFieldQuery() throws Exception { assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); + assumeTrue("5.x behaves differently, so skip on non-6.x indices", + indexVersionCreated.onOrAfter(Version.V_6_0_0_alpha1)); + QueryShardContext context = createShardContext(); QueryStringQueryBuilder queryBuilder = new QueryStringQueryBuilder("foo:*"); Query query = queryBuilder.toQuery(context); diff --git a/core/src/test/java/org/elasticsearch/index/query/ScriptQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/ScriptQueryBuilderTests.java index 3be16e27c48cd..d273825f9794c 100644 --- a/core/src/test/java/org/elasticsearch/index/query/ScriptQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/ScriptQueryBuilderTests.java @@ -66,7 +66,7 @@ public void testFromJsonVerbose() throws IOException { "{\n" + " \"script\" : {\n" + " \"script\" : {\n" + - " \"inline\" : \"5\",\n" + + " \"source\" : \"5\",\n" + " \"lang\" : \"mockscript\"\n" + " },\n" + " \"boost\" : 1.0,\n" + diff --git a/core/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java index e08221a168c2a..f13e01e52ae95 100644 --- a/core/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; @@ -30,6 +31,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.TestUtil; +import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.ParsingException; import org.elasticsearch.index.mapper.MapperService; @@ -47,6 +49,7 @@ import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -196,6 +199,9 @@ public void testFieldsCannotBeSetToNull() { } public void testDefaultFieldParsing() throws IOException { + assumeTrue("5.x behaves differently, so skip on non-6.x indices", + indexVersionCreated.onOrAfter(Version.V_6_0_0_alpha1)); + String query = randomAlphaOfLengthBetween(1, 10).toLowerCase(Locale.ROOT); String contentString = "{\n" + " \"simple_query_string\" : {\n" + @@ -211,7 +217,7 @@ public void testDefaultFieldParsing() throws IOException { // the remaining tests requires either a mapping that we register with types in base test setup if (getCurrentTypes().length > 0) { Query luceneQuery = queryBuilder.toQuery(shardContext); - assertThat(luceneQuery, instanceOf(BooleanQuery.class)); + assertThat(luceneQuery, anyOf(instanceOf(BooleanQuery.class), instanceOf(DisjunctionMaxQuery.class))); } } @@ -229,30 +235,39 @@ protected void doAssertLuceneQuery(SimpleQueryStringBuilder queryBuilder, Query if ("".equals(queryBuilder.value())) { assertThat(query, instanceOf(MatchNoDocsQuery.class)); } else if (queryBuilder.fields().size() > 1) { - assertThat(query, instanceOf(BooleanQuery.class)); - BooleanQuery boolQuery = (BooleanQuery) query; - for (BooleanClause clause : boolQuery.clauses()) { - if (clause.getQuery() instanceof TermQuery) { - TermQuery inner = (TermQuery) clause.getQuery(); - assertThat(inner.getTerm().bytes().toString(), is(inner.getTerm().bytes().toString().toLowerCase(Locale.ROOT))); + assertThat(query, anyOf(instanceOf(BooleanQuery.class), instanceOf(DisjunctionMaxQuery.class))); + if (query instanceof BooleanQuery) { + BooleanQuery boolQuery = (BooleanQuery) query; + for (BooleanClause clause : boolQuery.clauses()) { + if (clause.getQuery() instanceof TermQuery) { + TermQuery inner = (TermQuery) clause.getQuery(); + assertThat(inner.getTerm().bytes().toString(), is(inner.getTerm().bytes().toString().toLowerCase(Locale.ROOT))); + } } - } - assertThat(boolQuery.clauses().size(), equalTo(queryBuilder.fields().size())); - Iterator> fieldsIterator = queryBuilder.fields().entrySet().iterator(); - for (BooleanClause booleanClause : boolQuery) { - Map.Entry field = fieldsIterator.next(); - assertTermOrBoostQuery(booleanClause.getQuery(), field.getKey(), queryBuilder.value(), field.getValue()); - } - /** - * TODO: - * Test disabled because we cannot check min should match consistently: - * https://github.com/elastic/elasticsearch/issues/23966 - * - if (queryBuilder.minimumShouldMatch() != null && !boolQuery.isCoordDisabled()) { - assertThat(boolQuery.getMinimumNumberShouldMatch(), greaterThan(0)); + assertThat(boolQuery.clauses().size(), equalTo(queryBuilder.fields().size())); + Iterator> fieldsIterator = queryBuilder.fields().entrySet().iterator(); + for (BooleanClause booleanClause : boolQuery) { + Map.Entry field = fieldsIterator.next(); + assertTermOrBoostQuery(booleanClause.getQuery(), field.getKey(), queryBuilder.value(), field.getValue()); } - * - **/ + if (queryBuilder.minimumShouldMatch() != null) { + assertThat(boolQuery.getMinimumNumberShouldMatch(), greaterThan(0)); + } + } else if (query instanceof DisjunctionMaxQuery) { + DisjunctionMaxQuery maxQuery = (DisjunctionMaxQuery) query; + for (Query disjunct : maxQuery.getDisjuncts()) { + if (disjunct instanceof TermQuery) { + TermQuery inner = (TermQuery) disjunct; + assertThat(inner.getTerm().bytes().toString(), is(inner.getTerm().bytes().toString().toLowerCase(Locale.ROOT))); + } + } + assertThat(maxQuery.getDisjuncts().size(), equalTo(queryBuilder.fields().size())); + Iterator> fieldsIterator = queryBuilder.fields().entrySet().iterator(); + for (Query disjunct : maxQuery) { + Map.Entry field = fieldsIterator.next(); + assertTermOrBoostQuery(disjunct, field.getKey(), queryBuilder.value(), field.getValue()); + } + } } else if (queryBuilder.fields().size() == 1) { Map.Entry field = queryBuilder.fields().entrySet().iterator().next(); assertTermOrBoostQuery(query, field.getKey(), queryBuilder.value(), field.getValue()); @@ -261,7 +276,8 @@ protected void doAssertLuceneQuery(SimpleQueryStringBuilder queryBuilder, Query if (ms.allEnabled()) { assertTermQuery(query, MetaData.ALL, queryBuilder.value()); } else { - assertThat(query.getClass(), anyOf(equalTo(BooleanQuery.class), equalTo(MatchNoDocsQuery.class))); + assertThat(query.getClass(), + anyOf(equalTo(BooleanQuery.class), equalTo(DisjunctionMaxQuery.class), equalTo(MatchNoDocsQuery.class))); } } else { fail("Encountered lucene query type we do not have a validation implementation for in our " @@ -337,7 +353,6 @@ public void testFromJson() throws IOException { assertEquals(json, ".quote", parsed.quoteFieldSuffix()); } - @AwaitsFix(bugUrl = "Waiting on fix for minimumShouldMatch https://github.com/elastic/elasticsearch/issues/23966") public void testMinimumShouldMatch() throws IOException { QueryShardContext shardContext = createShardContext(); int numberOfTerms = randomIntBetween(1, 4); @@ -360,12 +375,13 @@ public void testMinimumShouldMatch() throws IOException { // check special case: one term & one field should get simplified to a TermQuery if (numberOfFields * numberOfTerms == 1) { assertThat(query, instanceOf(TermQuery.class)); + } else if (numberOfTerms == 1) { + assertThat(query, instanceOf(DisjunctionMaxQuery.class)); } else { assertThat(query, instanceOf(BooleanQuery.class)); BooleanQuery boolQuery = (BooleanQuery) query; int expectedMinimumShouldMatch = numberOfTerms * percent / 100; - if (numberOfTerms == 1 - || simpleQueryStringBuilder.defaultOperator().equals(Operator.AND)) { + if (simpleQueryStringBuilder.defaultOperator().equals(Operator.AND)) { expectedMinimumShouldMatch = 0; } assertEquals(expectedMinimumShouldMatch, boolQuery.getMinimumNumberShouldMatch()); diff --git a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 8e27ab5e9d392..72ace394d0119 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -105,7 +105,7 @@ protected IndexMetaData buildIndexMetaData(int replicas, Map map .build(); IndexMetaData.Builder metaData = IndexMetaData.builder(index.getName()) .settings(settings) - .primaryTerm(0, 1); + .primaryTerm(0, randomIntBetween(1, 100)); for (Map.Entry typeMapping : mappings.entrySet()) { metaData.putMapping(typeMapping.getKey(), typeMapping.getValue()); } diff --git a/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index 92837f25871ee..198b02e17c36e 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -239,7 +239,7 @@ protected EngineFactory getEngineFactory(ShardRouting routing) { .source("{}", XContentType.JSON) ); assertTrue(response.isFailed()); - assertNoOpTranslogOperationForDocumentFailure(shards, 1, failureMessage); + assertNoOpTranslogOperationForDocumentFailure(shards, 1, shards.getPrimary().getPrimaryTerm(), failureMessage); shards.assertAllEqual(0); // add some replicas @@ -253,7 +253,7 @@ protected EngineFactory getEngineFactory(ShardRouting routing) { .source("{}", XContentType.JSON) ); assertTrue(response.isFailed()); - assertNoOpTranslogOperationForDocumentFailure(shards, 2, failureMessage); + assertNoOpTranslogOperationForDocumentFailure(shards, 2, shards.getPrimary().getPrimaryTerm(), failureMessage); shards.assertAllEqual(0); } } @@ -324,6 +324,7 @@ public long addDocument(Iterable doc) throws IOExcepti private static void assertNoOpTranslogOperationForDocumentFailure( Iterable replicationGroup, int expectedOperation, + long expectedPrimaryTerm, String failureMessage) throws IOException { for (IndexShard indexShard : replicationGroup) { try(Translog.View view = indexShard.acquireTranslogView()) { @@ -334,6 +335,7 @@ private static void assertNoOpTranslogOperationForDocumentFailure( do { assertThat(op.opType(), equalTo(Translog.Operation.Type.NO_OP)); assertThat(op.seqNo(), equalTo(expectedSeqNo)); + assertThat(op.primaryTerm(), equalTo(expectedPrimaryTerm)); assertThat(((Translog.NoOp) op).reason(), containsString(failureMessage)); op = snapshot.next(); expectedSeqNo++; diff --git a/core/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java b/core/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java index be6b0c3865aeb..a005d7009eab6 100644 --- a/core/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java @@ -93,13 +93,14 @@ public void testCrossFieldMultiMatchQuery() throws IOException { Query parsedQuery = multiMatchQuery("banon").field("name.first", 2).field("name.last", 3).field("foobar").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).toQuery(queryShardContext); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { Query rewrittenQuery = searcher.searcher().rewrite(parsedQuery); - - BooleanQuery.Builder expected = new BooleanQuery.Builder(); - expected.add(new TermQuery(new Term("foobar", "banon")), BooleanClause.Occur.SHOULD); Query tq1 = new BoostQuery(new TermQuery(new Term("name.first", "banon")), 2); Query tq2 = new BoostQuery(new TermQuery(new Term("name.last", "banon")), 3); - expected.add(new DisjunctionMaxQuery(Arrays.asList(tq1, tq2), 0f), BooleanClause.Occur.SHOULD); - assertEquals(expected.build(), rewrittenQuery); + Query expected = new DisjunctionMaxQuery( + Arrays.asList( + new TermQuery(new Term("foobar", "banon")), + new DisjunctionMaxQuery(Arrays.asList(tq1, tq2), 0f) + ), 0f); + assertEquals(expected, rewrittenQuery); } } @@ -110,7 +111,7 @@ public void testBlendTerms() { ft2.setName("bar"); Term[] terms = new Term[] { new Term("foo", "baz"), new Term("bar", "baz") }; float[] boosts = new float[] {2, 3}; - Query expected = BlendedTermQuery.booleanBlendedQuery(terms, boosts); + Query expected = BlendedTermQuery.dismaxBlendedQuery(terms, boosts, 1.0f); Query actual = MultiMatchQuery.blendTerm( indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }), new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); @@ -126,7 +127,7 @@ public void testBlendTermsWithFieldBoosts() { ft2.setBoost(10); Term[] terms = new Term[] { new Term("foo", "baz"), new Term("bar", "baz") }; float[] boosts = new float[] {200, 30}; - Query expected = BlendedTermQuery.booleanBlendedQuery(terms, boosts); + Query expected = BlendedTermQuery.dismaxBlendedQuery(terms, boosts, 1.0f); Query actual = MultiMatchQuery.blendTerm( indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }), new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); @@ -145,7 +146,7 @@ public Query termQuery(Object value, QueryShardContext context) { ft2.setName("bar"); Term[] terms = new Term[] { new Term("foo", "baz") }; float[] boosts = new float[] {2}; - Query expected = BlendedTermQuery.booleanBlendedQuery(terms, boosts); + Query expected = BlendedTermQuery.dismaxBlendedQuery(terms, boosts, 1.0f); Query actual = MultiMatchQuery.blendTerm( indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }), new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); @@ -164,12 +165,13 @@ public Query termQuery(Object value, QueryShardContext context) { ft2.setName("bar"); Term[] terms = new Term[] { new Term("foo", "baz") }; float[] boosts = new float[] {2}; - Query expectedClause1 = BlendedTermQuery.booleanBlendedQuery(terms, boosts); - Query expectedClause2 = new BoostQuery(new MatchAllDocsQuery(), 3); - Query expected = new BooleanQuery.Builder() - .add(expectedClause1, Occur.SHOULD) - .add(expectedClause2, Occur.SHOULD) - .build(); + Query expectedDisjunct1 = BlendedTermQuery.dismaxBlendedQuery(terms, boosts, 1.0f); + Query expectedDisjunct2 = new BoostQuery(new MatchAllDocsQuery(), 3); + Query expected = new DisjunctionMaxQuery( + Arrays.asList( + expectedDisjunct2, + expectedDisjunct1 + ), 1.0f); Query actual = MultiMatchQuery.blendTerm( indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }), new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 83b948fb4f0ab..7a6813864edb3 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -1281,7 +1281,7 @@ public void testRecoverFromStoreWithNoOps() throws IOException { while((operation = snapshot.next()) != null) { if (operation.opType() == Translog.Operation.Type.NO_OP) { numNoops++; - assertEquals(1, operation.primaryTerm()); + assertEquals(newShard.getPrimaryTerm(), operation.primaryTerm()); assertEquals(0, operation.seqNo()); } } diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index d52adf37d6e56..48299d0429114 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -104,12 +104,14 @@ import java.util.stream.Collectors; import java.util.stream.LongStream; +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomLongBetween; import static org.elasticsearch.common.util.BigArrays.NON_RECYCLING_INSTANCE; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.lessThanOrEqualTo; @LuceneTestCase.SuppressFileSystems("ExtrasFS") public class TranslogTests extends ESTestCase { @@ -141,7 +143,7 @@ protected Translog createTranslog(TranslogConfig config, String translogUUID) th return new Translog(config, translogUUID, new TranslogDeletionPolicy(), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); } - private void markCurrentGenAsCommitted(Translog translog) { + private void markCurrentGenAsCommitted(Translog translog) throws IOException { commit(translog, translog.currentFileGeneration()); } @@ -150,9 +152,14 @@ private void rollAndCommit(Translog translog) throws IOException { commit(translog, translog.currentFileGeneration()); } - private void commit(Translog translog, long genToCommit) { - translog.getDeletionPolicy().setMinTranslogGenerationForRecovery(genToCommit); + private void commit(Translog translog, long genToCommit) throws IOException { + final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); + deletionPolicy.setMinTranslogGenerationForRecovery(genToCommit); translog.trimUnreferencedReaders(); + if (deletionPolicy.pendingViewsCount() == 0) { + assertThat(deletionPolicy.minTranslogGenRequired(), equalTo(genToCommit)); + } + assertThat(translog.getMinFileGeneration(), equalTo(deletionPolicy.minTranslogGenRequired())); } @Override @@ -347,24 +354,24 @@ public void testStats() throws IOException { { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(2L)); - assertThat(stats.getTranslogSizeInBytes(), equalTo(139L)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(146L)); } translog.add(new Translog.Delete("test", "3", 2, newUid("3"))); { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(3L)); - assertThat(stats.getTranslogSizeInBytes(), equalTo(181L)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(195L)); } translog.add(new Translog.NoOp(3, 1, randomAlphaOfLength(16))); { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(4L)); - assertThat(stats.getTranslogSizeInBytes(), equalTo(223L)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(237L)); } - final long expectedSizeInBytes = 266L; + final long expectedSizeInBytes = 280L; translog.rollGeneration(); { final TranslogStats stats = stats(); @@ -484,7 +491,7 @@ public void testSnapshotOnClosedTranslog() throws IOException { } public void assertFileIsPresent(Translog translog, long id) { - if (Files.exists(translogDir.resolve(Translog.getFilename(id)))) { + if (Files.exists(translog.location().resolve(Translog.getFilename(id)))) { return; } fail(Translog.getFilename(id) + " is not present in any location: " + translog.location()); @@ -494,6 +501,15 @@ public void assertFileDeleted(Translog translog, long id) { assertFalse("translog [" + id + "] still exists", Files.exists(translog.location().resolve(Translog.getFilename(id)))); } + private void assertFilePresences(Translog translog) { + for (long gen = translog.getMinFileGeneration(); gen < translog.currentFileGeneration(); gen++) { + assertFileIsPresent(translog, gen); + } + for (long gen = 1; gen < translog.getMinFileGeneration(); gen++) { + assertFileDeleted(translog, gen); + } + } + static class LocationOperation implements Comparable { final Translog.Operation operation; final Translog.Location location; @@ -1015,7 +1031,7 @@ public void testBasicCheckpoint() throws IOException { } public void testTranslogWriter() throws IOException { - final TranslogWriter writer = translog.createWriter(0); + final TranslogWriter writer = translog.createWriter(translog.currentFileGeneration() + 1); final int numOps = randomIntBetween(8, 128); byte[] bytes = new byte[4]; ByteArrayDataOutput out = new ByteArrayDataOutput(bytes); @@ -1075,7 +1091,7 @@ public void testTranslogWriter() throws IOException { } public void testCloseIntoReader() throws IOException { - try (TranslogWriter writer = translog.createWriter(0)) { + try (TranslogWriter writer = translog.createWriter(translog.currentFileGeneration() + 1)) { final int numOps = randomIntBetween(8, 128); final byte[] bytes = new byte[4]; final ByteArrayDataOutput out = new ByteArrayDataOutput(bytes); @@ -1270,7 +1286,7 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { TranslogConfig config = translog.getConfig(); Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME); Checkpoint read = Checkpoint.read(ckp); - Checkpoint corrupted = Checkpoint.emptyTranslogCheckpoint(0, 0, SequenceNumbersService.UNASSIGNED_SEQ_NO); + Checkpoint corrupted = Checkpoint.emptyTranslogCheckpoint(0, 0, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0); Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), corrupted, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); @@ -1278,8 +1294,8 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { fail("corrupted"); } catch (IllegalStateException ex) { assertEquals("Checkpoint file translog-2.ckp already exists but has corrupted content expected: Checkpoint{offset=3123, " + - "numOps=55, generation=2, minSeqNo=45, maxSeqNo=99, globalCheckpoint=-2} but got: Checkpoint{offset=0, numOps=0, " + - "generation=0, minSeqNo=-1, maxSeqNo=-1, globalCheckpoint=-2}", ex.getMessage()); + "numOps=55, generation=2, minSeqNo=45, maxSeqNo=99, globalCheckpoint=-2, minTranslogGeneration=1} but got: Checkpoint{offset=0, numOps=0, " + + "generation=0, minSeqNo=-1, maxSeqNo=-1, globalCheckpoint=-2, minTranslogGeneration=0}", ex.getMessage()); } Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), read, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) { @@ -1699,6 +1715,91 @@ protected void afterAdd() throws IOException { } } + /** + * Tests the situation where the node crashes after a translog gen was committed to lucene, but before the translog had the chance + * to clean up its files. + */ + public void testRecoveryFromAFutureGenerationCleansUp() throws IOException { + int translogOperations = randomIntBetween(10, 100); + for (int op = 0; op < translogOperations / 2; op++) { + translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))); + if (rarely()) { + translog.rollGeneration(); + } + } + translog.rollGeneration(); + long comittedGeneration = randomLongBetween(2, translog.currentFileGeneration()); + for (int op = translogOperations / 2; op < translogOperations; op++) { + translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))); + if (rarely()) { + translog.rollGeneration(); + } + } + // engine blows up, after committing the above generation + translog.close(); + TranslogConfig config = translog.getConfig(); + final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(); + deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration); + translog = new Translog(config, translog.getTranslogUUID(), deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); + assertThat(translog.getMinFileGeneration(), equalTo(1L)); + // no trimming done yet, just recovered + for (long gen = 1; gen < translog.currentFileGeneration(); gen++) { + assertFileIsPresent(translog, gen); + } + translog.trimUnreferencedReaders(); + for (long gen = 1; gen < comittedGeneration; gen++) { + assertFileDeleted(translog, gen); + } + } + + /** + * Tests the situation where the node crashes after a translog gen was committed to lucene, but before the translog had the chance + * to clean up its files. + */ + public void testRecoveryFromFailureOnTrimming() throws IOException { + Path tempDir = createTempDir(); + final FailSwitch fail = new FailSwitch(); + fail.failNever(); + final TranslogConfig config = getTranslogConfig(tempDir); + final long comittedGeneration; + final String translogUUID; + try (Translog translog = getFailableTranslog(fail, config)) { + translogUUID = translog.getTranslogUUID(); + int translogOperations = randomIntBetween(10, 100); + for (int op = 0; op < translogOperations / 2; op++) { + translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))); + if (rarely()) { + translog.rollGeneration(); + } + } + translog.rollGeneration(); + comittedGeneration = randomLongBetween(2, translog.currentFileGeneration()); + for (int op = translogOperations / 2; op < translogOperations; op++) { + translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))); + if (rarely()) { + translog.rollGeneration(); + } + } + fail.failRandomly(); + try { + commit(translog, comittedGeneration); + } catch (Exception e) { + // expected... + } + } + final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(); + deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration); + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) { + // we don't know when things broke exactly + assertThat(translog.getMinFileGeneration(), greaterThanOrEqualTo(1L)); + assertThat(translog.getMinFileGeneration(), lessThanOrEqualTo(comittedGeneration)); + assertFilePresences(translog); + translog.trimUnreferencedReaders(); + assertThat(translog.getMinFileGeneration(), equalTo(comittedGeneration)); + assertFilePresences(translog); + } + } + private Translog getFailableTranslog(FailSwitch fail, final TranslogConfig config) throws IOException { return getFailableTranslog(fail, config, randomBoolean(), false, null, new TranslogDeletionPolicy()); } @@ -1756,6 +1857,16 @@ ChannelFactory getChannelFactory() { } }; } + + @Override + void deleteReaderFiles(TranslogReader reader) { + if (fail.fail()) { + // simulate going OOM and dieing just at the wrong moment. + throw new RuntimeException("simulated"); + } else { + super.deleteReaderFiles(reader); + } + } }; } @@ -1953,6 +2064,7 @@ public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException { * that we can, after we hit an exception, open and recover the translog successfully and retrieve all successfully synced operations * from the transaction log. */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/25133") public void testWithRandomException() throws IOException { final int runs = randomIntBetween(5, 10); for (int run = 0; run < runs; run++) { @@ -2054,7 +2166,9 @@ private Checkpoint randomCheckpoint() { minSeqNo = b; maxSeqNo = a; } - return new Checkpoint(randomLong(), randomInt(), randomLong(), minSeqNo, maxSeqNo, randomNonNegativeLong()); + final long generation = randomNonNegativeLong(); + return new Checkpoint(randomLong(), randomInt(), generation, minSeqNo, maxSeqNo, randomNonNegativeLong(), + randomLongBetween(1, generation)); } public void testCheckpointOnDiskFull() throws IOException { @@ -2150,6 +2264,20 @@ public void testTranslogOpSerialization() throws Exception { in = out.bytes().streamInput(); Translog.Delete serializedDelete = new Translog.Delete(in); assertEquals(delete, serializedDelete); + + // simulate legacy delete serialization + out = new BytesStreamOutput(); + out.writeVInt(Translog.Delete.FORMAT_5_0); + out.writeString(UidFieldMapper.NAME); + out.writeString("my_type#my_id"); + out.writeLong(3); // version + out.writeByte(VersionType.INTERNAL.getValue()); + out.writeLong(2); // seq no + out.writeLong(0); // primary term + in = out.bytes().streamInput(); + serializedDelete = new Translog.Delete(in); + assertEquals("my_type", serializedDelete.type()); + assertEquals("my_id", serializedDelete.id()); } public void testRollGeneration() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java index d008749506161..f6aafe765f56f 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java @@ -89,7 +89,7 @@ public TranslogReader openReader(final Path path, final long id) throws IOExcept final long minSeqNo = SequenceNumbersService.NO_OPS_PERFORMED; final long maxSeqNo = SequenceNumbersService.NO_OPS_PERFORMED; final Checkpoint checkpoint = - new Checkpoint(Files.size(path), 1, id, minSeqNo, maxSeqNo, SequenceNumbersService.UNASSIGNED_SEQ_NO); + new Checkpoint(Files.size(path), 1, id, minSeqNo, maxSeqNo, SequenceNumbersService.UNASSIGNED_SEQ_NO, id); return TranslogReader.open(channel, path, checkpoint, null); } } diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java index 4f0fec4c85e52..a2e678585844f 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -116,7 +116,7 @@ private Path writeTranslog( generation, resolve, FileChannel::open, - TranslogConfig.DEFAULT_BUFFER_SIZE, () -> globalCheckpoint)) {} + TranslogConfig.DEFAULT_BUFFER_SIZE, () -> globalCheckpoint, generation, () -> generation)) {} return tempDir; } diff --git a/core/src/test/java/org/elasticsearch/ingest/ValueSourceTests.java b/core/src/test/java/org/elasticsearch/ingest/ValueSourceTests.java index 0ac353b4ae806..72238d3b59656 100644 --- a/core/src/test/java/org/elasticsearch/ingest/ValueSourceTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/ValueSourceTests.java @@ -49,7 +49,7 @@ public void testCopyDoesNotChangeProvidedMap() { myPreciousMap.put("field2", "value2"); IngestDocument ingestDocument = new IngestDocument(new HashMap<>(), new HashMap<>()); - ingestDocument.setFieldValue(TestTemplateService.instance().compile("field1"), + ingestDocument.setFieldValue(new TestTemplateService.MockTemplateScript.Factory("field1"), ValueSource.wrap(myPreciousMap, TestTemplateService.instance())); ingestDocument.removeField("field1.field2"); @@ -62,7 +62,7 @@ public void testCopyDoesNotChangeProvidedList() { myPreciousList.add("value"); IngestDocument ingestDocument = new IngestDocument(new HashMap<>(), new HashMap<>()); - ingestDocument.setFieldValue(TestTemplateService.instance().compile("field1"), + ingestDocument.setFieldValue(new TestTemplateService.MockTemplateScript.Factory("field1"), ValueSource.wrap(myPreciousList, TestTemplateService.instance())); ingestDocument.removeField("field1.0"); diff --git a/core/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java b/core/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java index fb1e0885a5f9a..f1315b9cdc49a 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java @@ -56,11 +56,11 @@ public void testGetScript() throws Exception { builder.storeScript("any", StoredScriptSource.parse("lang", sourceBuilder.bytes(), sourceBuilder.contentType())); ScriptMetaData scriptMetaData = builder.build(); - assertEquals("{\"field\":\"value\"}", scriptMetaData.getStoredScript("template", "lang").getCode()); - assertEquals("value", scriptMetaData.getStoredScript("template_field", "lang").getCode()); - assertEquals("{\"field\":\"value\"}", scriptMetaData.getStoredScript("script", "lang").getCode()); - assertEquals("value", scriptMetaData.getStoredScript("script_field", "lang").getCode()); - assertEquals("{\"field\":\"value\"}", scriptMetaData.getStoredScript("any", "lang").getCode()); + assertEquals("{\"field\":\"value\"}", scriptMetaData.getStoredScript("template", "lang").getSource()); + assertEquals("value", scriptMetaData.getStoredScript("template_field", "lang").getSource()); + assertEquals("{\"field\":\"value\"}", scriptMetaData.getStoredScript("script", "lang").getSource()); + assertEquals("value", scriptMetaData.getStoredScript("script_field", "lang").getSource()); + assertEquals("{\"field\":\"value\"}", scriptMetaData.getStoredScript("any", "lang").getSource()); } public void testDiff() throws Exception { @@ -85,9 +85,9 @@ public void testDiff() throws Exception { assertNotNull(((DiffableUtils.MapDiff) diff.pipelines).getUpserts().get("4")); ScriptMetaData result = (ScriptMetaData) diff.apply(scriptMetaData1); - assertEquals("{\"foo\":\"abc\"}", result.getStoredScript("1", "lang").getCode()); - assertEquals("{\"foo\":\"changed\"}", result.getStoredScript("2", "lang").getCode()); - assertEquals("{\"foo\":\"jkl\"}", result.getStoredScript("4", "lang").getCode()); + assertEquals("{\"foo\":\"abc\"}", result.getStoredScript("1", "lang").getSource()); + assertEquals("{\"foo\":\"changed\"}", result.getStoredScript("2", "lang").getSource()); + assertEquals("{\"foo\":\"jkl\"}", result.getStoredScript("4", "lang").getSource()); } public void testBuilder() { @@ -95,7 +95,7 @@ public void testBuilder() { builder.storeScript("_id", StoredScriptSource.parse("_lang", new BytesArray("{\"script\":\"1 + 1\"}"), XContentType.JSON)); ScriptMetaData result = builder.build(); - assertEquals("1 + 1", result.getStoredScript("_id", "_lang").getCode()); + assertEquals("1 + 1", result.getStoredScript("_id", "_lang").getSource()); } private ScriptMetaData randomScriptMetaData(XContentType sourceContentType) throws IOException { diff --git a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index af1b0dc3d01c3..9a81b1bcbbde7 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -240,7 +240,7 @@ public void testStoreScript() throws Exception { ScriptMetaData scriptMetaData = ScriptMetaData.putStoredScript(null, "_id", StoredScriptSource.parse("_lang", script, XContentType.JSON)); assertNotNull(scriptMetaData); - assertEquals("abc", scriptMetaData.getStoredScript("_id", "_lang").getCode()); + assertEquals("abc", scriptMetaData.getStoredScript("_id", "_lang").getSource()); } public void testDeleteScript() throws Exception { @@ -266,7 +266,7 @@ public void testGetStoredScript() throws Exception { StoredScriptSource.parse("_lang", new BytesArray("{\"script\":\"abc\"}"), XContentType.JSON)).build())) .build(); - assertEquals("abc", scriptService.getStoredScript(cs, new GetStoredScriptRequest("_id", "_lang")).getCode()); + assertEquals("abc", scriptService.getStoredScript(cs, new GetStoredScriptRequest("_id", "_lang")).getSource()); assertNull(scriptService.getStoredScript(cs, new GetStoredScriptRequest("_id2", "_lang"))); cs = ClusterState.builder(new ClusterName("_name")).build(); diff --git a/core/src/test/java/org/elasticsearch/script/StoredScriptTests.java b/core/src/test/java/org/elasticsearch/script/StoredScriptTests.java index af54afbf77dee..6c2a0caf7208b 100644 --- a/core/src/test/java/org/elasticsearch/script/StoredScriptTests.java +++ b/core/src/test/java/org/elasticsearch/script/StoredScriptTests.java @@ -248,6 +248,16 @@ public void testSourceParsing() throws Exception { } // complex script with script object + try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { + builder.startObject().field("script").startObject().field("lang", "lang").field("source", "code").endObject().endObject(); + + StoredScriptSource parsed = StoredScriptSource.parse(null, builder.bytes(), XContentType.JSON); + StoredScriptSource source = new StoredScriptSource("lang", "code", Collections.emptyMap()); + + assertThat(parsed, equalTo(source)); + } + + // complex script using "code" backcompat try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { builder.startObject().field("script").startObject().field("lang", "lang").field("code", "code").endObject().endObject(); @@ -256,10 +266,11 @@ public void testSourceParsing() throws Exception { assertThat(parsed, equalTo(source)); } + assertWarnings("Deprecated field [code] used, expected [source] instead"); // complex script with script object and empty options try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { - builder.startObject().field("script").startObject().field("lang", "lang").field("code", "code") + builder.startObject().field("script").startObject().field("lang", "lang").field("source", "code") .field("options").startObject().endObject().endObject().endObject(); StoredScriptSource parsed = StoredScriptSource.parse(null, builder.bytes(), XContentType.JSON); @@ -270,7 +281,7 @@ public void testSourceParsing() throws Exception { // complex script with embedded template try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { - builder.startObject().field("script").startObject().field("lang", "lang").startObject("code").field("query", "code") + builder.startObject().field("script").startObject().field("lang", "lang").startObject("source").field("query", "code") .endObject().startObject("options").endObject().endObject().endObject().string(); String code; @@ -298,25 +309,25 @@ public void testSourceParsingErrors() throws Exception { // check for missing lang parameter when parsing a script try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { - builder.startObject().field("script").startObject().field("code", "code").endObject().endObject(); + builder.startObject().field("script").startObject().field("source", "code").endObject().endObject(); IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> StoredScriptSource.parse(null, builder.bytes(), XContentType.JSON)); assertThat(iae.getMessage(), equalTo("must specify lang for stored script")); } - // check for missing code parameter when parsing a script + // check for missing source parameter when parsing a script try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { builder.startObject().field("script").startObject().field("lang", "lang").endObject().endObject(); IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> StoredScriptSource.parse(null, builder.bytes(), XContentType.JSON)); - assertThat(iae.getMessage(), equalTo("must specify code for stored script")); + assertThat(iae.getMessage(), equalTo("must specify source for stored script")); } // check for illegal options parameter when parsing a script try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { - builder.startObject().field("script").startObject().field("lang", "lang").field("code", "code") + builder.startObject().field("script").startObject().field("lang", "lang").field("source", "code") .startObject("options").field("option", "option").endObject().endObject().endObject(); IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> diff --git a/core/src/test/java/org/elasticsearch/script/StoredScriptsIT.java b/core/src/test/java/org/elasticsearch/script/StoredScriptsIT.java index 63945cd03a681..80a8f4deaa741 100644 --- a/core/src/test/java/org/elasticsearch/script/StoredScriptsIT.java +++ b/core/src/test/java/org/elasticsearch/script/StoredScriptsIT.java @@ -55,7 +55,7 @@ public void testBasics() { .setId("foobar") .setContent(new BytesArray("{\"script\":\"1\"}"), XContentType.JSON)); String script = client().admin().cluster().prepareGetStoredScript(LANG, "foobar") - .get().getSource().getCode(); + .get().getSource().getSource(); assertNotNull(script); assertEquals("1", script); diff --git a/core/src/test/java/org/elasticsearch/search/SearchHitsTests.java b/core/src/test/java/org/elasticsearch/search/SearchHitsTests.java index c25eb7da81454..decfe804a4284 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchHitsTests.java +++ b/core/src/test/java/org/elasticsearch/search/SearchHitsTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.search; +import org.apache.lucene.util.TestUtil; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.ToXContent; @@ -44,7 +45,7 @@ public static SearchHits createTestItem() { for (int i = 0; i < searchHits; i++) { hits[i] = SearchHitTests.createTestItem(false); // creating random innerHits could create loops } - long totalHits = randomLong(); + long totalHits = frequently() ? TestUtil.nextLong(random(), 0, Long.MAX_VALUE) : -1; float maxScore = frequently() ? randomFloat() : Float.NaN; return new SearchHits(hits, totalHits, maxScore); } diff --git a/core/src/test/java/org/elasticsearch/search/SearchModuleTests.java b/core/src/test/java/org/elasticsearch/search/SearchModuleTests.java index 85b13974042e0..4b053b1968f8c 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchModuleTests.java +++ b/core/src/test/java/org/elasticsearch/search/SearchModuleTests.java @@ -56,7 +56,7 @@ import org.elasticsearch.search.fetch.subphase.highlight.FastVectorHighlighter; import org.elasticsearch.search.fetch.subphase.highlight.Highlighter; import org.elasticsearch.search.fetch.subphase.highlight.PlainHighlighter; -import org.elasticsearch.search.fetch.subphase.highlight.PostingsHighlighter; +import org.elasticsearch.search.fetch.subphase.highlight.UnifiedHighlighter; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.suggest.CustomSuggesterSearchIT.CustomSuggestionBuilder; import org.elasticsearch.search.suggest.SuggestionBuilder; @@ -204,7 +204,7 @@ public Map getHighlighters() { Map highlighters = module.getHighlighters(); assertEquals(FastVectorHighlighter.class, highlighters.get("fvh").getClass()); assertEquals(PlainHighlighter.class, highlighters.get("plain").getClass()); - assertEquals(PostingsHighlighter.class, highlighters.get("postings").getClass()); + assertEquals(UnifiedHighlighter.class, highlighters.get("unified").getClass()); assertSame(highlighters.get("custom"), customHighlighter); } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index 563fac1ba7df7..a90960c2ec944 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -852,7 +852,7 @@ public void testTopHitsInSecondLayerNested() throws Exception { } public void testNestedFetchFeatures() { - String hlType = randomFrom("plain", "fvh", "postings"); + String hlType = randomFrom("plain", "fvh", "unified"); HighlightBuilder.Field hlField = new HighlightBuilder.Field("comments.message") .highlightQuery(matchQuery("comments.message", "comment")) .forceSource(randomBoolean()) // randomly from stored field or _source diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index c8c0314805a9d..9cbd9fc5d75fd 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -102,8 +102,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { // TODO as we move analyzers out of the core we need to move some of these into HighlighterWithAnalyzersTests - private static final String[] ALL_TYPES = new String[] {"plain", "postings", "fvh", "unified"}; - private static final String[] UNIFIED_AND_NULL = new String[] {null, "unified"}; + private static final String[] ALL_TYPES = new String[] {"plain", "fvh", "unified"}; @Override protected Collection> nodePlugins() { @@ -127,11 +126,9 @@ public void testHighlightingWithStoredKeyword() throws IOException { .setSource(jsonBuilder().startObject().field("text", "foo").endObject()) .get(); refresh(); - for (String type : UNIFIED_AND_NULL) { - SearchResponse search = client().prepareSearch().setQuery(matchQuery("text", "foo")) - .highlighter(new HighlightBuilder().field(new Field("text")).highlighterType(type)).get(); - assertHighlight(search, 0, "text", 0, equalTo("foo")); - } + SearchResponse search = client().prepareSearch().setQuery(matchQuery("text", "foo")) + .highlighter(new HighlightBuilder().field(new Field("text"))).get(); + assertHighlight(search, 0, "text", 0, equalTo("foo")); } public void testHighlightingWithWildcardName() throws IOException { @@ -279,19 +276,19 @@ public void testEnsureNoNegativeOffsets() throws Exception { refresh(); SearchResponse search = client().prepareSearch() .setQuery(matchQuery("long_term", "thisisaverylongwordandmakessurethisfails foo highlighed")) - .highlighter(new HighlightBuilder().field("long_term", 18, 1)) + .highlighter(new HighlightBuilder().field("long_term", 18, 1).highlighterType("fvh")) .get(); assertHighlight(search, 0, "long_term", 0, 1, equalTo("thisisaverylongwordandmakessurethisfails")); search = client().prepareSearch() .setQuery(matchPhraseQuery("no_long_term", "test foo highlighed").slop(3)) - .highlighter(new HighlightBuilder().field("no_long_term", 18, 1).postTags("").preTags("")) + .highlighter(new HighlightBuilder().field("no_long_term", 18, 1).highlighterType("fvh").postTags("").preTags("")) .get(); assertNotHighlighted(search, 0, "no_long_term"); search = client().prepareSearch() .setQuery(matchPhraseQuery("no_long_term", "test foo highlighed").slop(3)) - .highlighter(new HighlightBuilder().field("no_long_term", 30, 1).postTags("").preTags("")) + .highlighter(new HighlightBuilder().field("no_long_term", 30, 1).highlighterType("fvh").postTags("").preTags("")) .get(); assertHighlight(search, 0, "no_long_term", 0, 1, equalTo("a test where foo is highlighed and")); @@ -326,26 +323,25 @@ public void testSourceLookupHighlightingUsingPlainHighlighter() throws Exception } indexRandom(true, indexRequestBuilders); - for (String type : UNIFIED_AND_NULL) { - SearchResponse search = client().prepareSearch() - .setQuery(matchQuery("title", "bug")) - .highlighter(new HighlightBuilder().field("title", -1, 0).highlighterType(type)) - .get(); + SearchResponse search = client().prepareSearch() + .setQuery(matchQuery("title", "bug")) + .highlighter(new HighlightBuilder().field("title", -1, 0)) + .get(); - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting bug present in elasticsearch")); - } + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting bug present in elasticsearch")); + } - search = client().prepareSearch() - .setQuery(matchQuery("attachments.body", "attachment")) - .highlighter(new HighlightBuilder().field("attachments.body", -1, 0).highlighterType(type)) - .get(); + search = client().prepareSearch() + .setQuery(matchQuery("attachments.body", "attachment")) + .highlighter(new HighlightBuilder().field("attachments.body", -1, 0)) + .get(); - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight(search, i, "attachments.body", 0, equalTo("attachment 1")); - assertHighlight(search, i, "attachments.body", 1, equalTo("attachment 2")); - } + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight(search, i, "attachments.body", 0, equalTo("attachment 1")); + assertHighlight(search, i, "attachments.body", 1, equalTo("attachment 2")); } + } public void testSourceLookupHighlightingUsingFastVectorHighlighter() throws Exception { @@ -380,25 +376,23 @@ public void testSourceLookupHighlightingUsingFastVectorHighlighter() throws Exce } indexRandom(true, indexRequestBuilders); - for (String type : UNIFIED_AND_NULL) { - SearchResponse search = client().prepareSearch() - .setQuery(matchQuery("title", "bug")) - .highlighter(new HighlightBuilder().field("title", -1, 0).highlighterType(type)) - .get(); + SearchResponse search = client().prepareSearch() + .setQuery(matchQuery("title", "bug")) + .highlighter(new HighlightBuilder().field("title", -1, 0)) + .get(); - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting bug present in elasticsearch")); - } + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting bug present in elasticsearch")); + } - search = client().prepareSearch() - .setQuery(matchQuery("attachments.body", "attachment")) - .highlighter(new HighlightBuilder().field("attachments.body", -1, 2).highlighterType(type)) - .execute().get(); + search = client().prepareSearch() + .setQuery(matchQuery("attachments.body", "attachment")) + .highlighter(new HighlightBuilder().field("attachments.body", -1, 2)) + .execute().get(); - for (int i = 0; i < 5; i++) { - assertHighlight(search, i, "attachments.body", 0, equalTo("attachment 1")); - assertHighlight(search, i, "attachments.body", 1, equalTo("attachment 2")); - } + for (int i = 0; i < 5; i++) { + assertHighlight(search, i, "attachments.body", 0, equalTo("attachment 1")); + assertHighlight(search, i, "attachments.body", 1, equalTo("attachment 2")); } } @@ -446,28 +440,26 @@ public void testSourceLookupHighlightingUsingPostingsHighlighter() throws Except assertHighlight(search, i, "title", 1, 2, equalTo("This is the second bug to perform highlighting on.")); } - for (String type : UNIFIED_AND_NULL) { - search = client().prepareSearch() - .setQuery(matchQuery("title", "bug")) - //sentences will be generated out of each value - .highlighter(new HighlightBuilder().field("title").highlighterType(type)).get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight(search, i, "title", 0, - equalTo("This is a test on the highlighting bug present in elasticsearch.")); - assertHighlight(search, i, "title", 1, 2, - equalTo("This is the second bug to perform highlighting on.")); - } + search = client().prepareSearch() + .setQuery(matchQuery("title", "bug")) + //sentences will be generated out of each value + .highlighter(new HighlightBuilder().field("title")).get(); - search = client().prepareSearch() - .setQuery(matchQuery("attachments.body", "attachment")) - .highlighter(new HighlightBuilder().field("attachments.body", -1, 2).highlighterType(type)) - .get(); + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight(search, i, "title", 0, + equalTo("This is a test on the highlighting bug present in elasticsearch.")); + assertHighlight(search, i, "title", 1, 2, + equalTo("This is the second bug to perform highlighting on.")); + } - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight(search, i, "attachments.body", 0, equalTo("attachment for this test")); - assertHighlight(search, i, "attachments.body", 1, 2, equalTo("attachment 2")); - } + search = client().prepareSearch() + .setQuery(matchQuery("attachments.body", "attachment")) + .highlighter(new HighlightBuilder().field("attachments.body", -1, 2)) + .get(); + + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight(search, i, "attachments.body", 0, equalTo("attachment for this test")); + assertHighlight(search, i, "attachments.body", 1, 2, equalTo("attachment 2")); } } @@ -521,9 +513,9 @@ public void testGlobalHighlightingSettingsOverriddenAtFieldLevel() { SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); - assertHighlight(searchResponse, 0, "field1", 0, 2, equalTo(" test")); - assertHighlight(searchResponse, 0, "field1", 1, 2, equalTo(" test")); - assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("this is another test")); + assertHighlight(searchResponse, 0, "field1", 0, 2, equalTo("test")); + assertHighlight(searchResponse, 0, "field1", 1, 2, equalTo("test")); + assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("yet another test")); } // Issue #5175 @@ -570,34 +562,31 @@ public void testForceSourceWithSourceDisabled() throws Exception { .setSource("field1", "The quick brown fox jumps over the lazy dog", "field2", "second field content").get(); refresh(); - for (String type : UNIFIED_AND_NULL) { - //works using stored field - SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(termQuery("field1", "quick")) - .highlighter(new HighlightBuilder().field(new Field("field1").preTags("").postTags("").highlighterType(type))) - .get(); - assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); + //works using stored field + SearchResponse searchResponse = client().prepareSearch("test") + .setQuery(termQuery("field1", "quick")) + .highlighter(new HighlightBuilder().field(new Field("field1").preTags("").postTags(""))) + .get(); + assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); - assertFailures(client().prepareSearch("test") - .setQuery(termQuery("field1", "quick")) - .highlighter( - new HighlightBuilder().field(new Field("field1").preTags("").postTags("") - .highlighterType(type).forceSource(true))), - RestStatus.BAD_REQUEST, - containsString("source is forced for fields [field1] but type [type1] has disabled _source")); + assertFailures(client().prepareSearch("test") + .setQuery(termQuery("field1", "quick")) + .highlighter( + new HighlightBuilder().field(new Field("field1").preTags("").postTags("").forceSource(true))), + RestStatus.BAD_REQUEST, + containsString("source is forced for fields [field1] but type [type1] has disabled _source")); - SearchSourceBuilder searchSource = SearchSourceBuilder.searchSource().query(termQuery("field1", "quick")) - .highlighter(highlight().forceSource(true).field("field1").highlighterType(type)); - assertFailures(client().prepareSearch("test").setSource(searchSource), - RestStatus.BAD_REQUEST, - containsString("source is forced for fields [field1] but type [type1] has disabled _source")); + SearchSourceBuilder searchSource = SearchSourceBuilder.searchSource().query(termQuery("field1", "quick")) + .highlighter(highlight().forceSource(true).field("field1")); + assertFailures(client().prepareSearch("test").setSource(searchSource), + RestStatus.BAD_REQUEST, + containsString("source is forced for fields [field1] but type [type1] has disabled _source")); - searchSource = SearchSourceBuilder.searchSource().query(termQuery("field1", "quick")) - .highlighter(highlight().forceSource(true).field("field*").highlighterType(type)); - assertFailures(client().prepareSearch("test").setSource(searchSource), - RestStatus.BAD_REQUEST, - matches("source is forced for fields \\[field\\d, field\\d\\] but type \\[type1\\] has disabled _source")); - } + searchSource = SearchSourceBuilder.searchSource().query(termQuery("field1", "quick")) + .highlighter(highlight().forceSource(true).field("field*")); + assertFailures(client().prepareSearch("test").setSource(searchSource), + RestStatus.BAD_REQUEST, + matches("source is forced for fields \\[field\\d, field\\d\\] but type \\[type1\\] has disabled _source")); } public void testPlainHighlighter() throws Exception { @@ -1015,16 +1004,14 @@ public void testSameContent() throws Exception { } indexRandom(true, indexRequestBuilders); - for (String type : UNIFIED_AND_NULL) { - SearchResponse search = client().prepareSearch() - .setQuery(matchQuery("title", "bug")) - .highlighter(new HighlightBuilder().field("title", -1, 0).highlighterType(type)) - .get(); + SearchResponse search = client().prepareSearch() + .setQuery(matchQuery("title", "bug")) + .highlighter(new HighlightBuilder().field("title", -1, 0)) + .get(); - for (int i = 0; i < 5; i++) { - assertHighlight(search, i, "title", 0, 1, equalTo("This is a test on the highlighting bug " + - "present in elasticsearch")); - } + for (int i = 0; i < 5; i++) { + assertHighlight(search, i, "title", 0, 1, equalTo("This is a test on the highlighting bug " + + "present in elasticsearch")); } } @@ -1041,7 +1028,7 @@ public void testFastVectorHighlighterOffsetParameter() throws Exception { SearchResponse search = client().prepareSearch() .setQuery(matchQuery("title", "bug")) - .highlighter(new HighlightBuilder().field("title", 30, 1, 10)) + .highlighter(new HighlightBuilder().field("title", 30, 1, 10).highlighterType("fvh")) .get(); for (int i = 0; i < 5; i++) { @@ -1061,16 +1048,14 @@ public void testEscapeHtml() throws Exception { } indexRandom(true, indexRequestBuilders); - for (String type : UNIFIED_AND_NULL) { - SearchResponse search = client().prepareSearch() - .setQuery(matchQuery("title", "test")) - .highlighter(new HighlightBuilder().encoder("html").field("title", 50, 1, 10).highlighterType(type)) - .get(); + SearchResponse search = client().prepareSearch() + .setQuery(matchQuery("title", "test")) + .highlighter(new HighlightBuilder().encoder("html").field("title", 50, 1, 10)) + .get(); - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight(search, i, "title", 0, 1, - startsWith("This is a html escaping highlighting test for *&?")); - } + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight(search, i, "title", 0, 1, + startsWith("This is a html escaping highlighting test for *&?")); } } @@ -1087,11 +1072,11 @@ public void testEscapeHtmlVector() throws Exception { SearchResponse search = client().prepareSearch() .setQuery(matchQuery("title", "test")) - .highlighter(new HighlightBuilder().encoder("html").field("title", 30, 1, 10)) + .highlighter(new HighlightBuilder().encoder("html").field("title", 30, 1, 10).highlighterType("plain")) .get(); for (int i = 0; i < 5; i++) { - assertHighlight(search, i, "title", 0, 1, equalTo("highlighting test for *&? elasticsearch")); + assertHighlight(search, i, "title", 0, 1, equalTo(" highlighting test for *&? elasticsearch")); } } @@ -1116,23 +1101,21 @@ public void testMultiMapperVectorWithStore() throws Exception { client().prepareIndex("test", "type1", "1").setSource("title", "this is a test").get(); refresh(); - for (String type : UNIFIED_AND_NULL) { - // simple search on body with standard analyzer with a simple field query - SearchResponse search = client().prepareSearch() - .setQuery(matchQuery("title", "this is a test")) - .highlighter(new HighlightBuilder().encoder("html").field("title", 50, 1).highlighterType(type)) - .get(); + // simple search on body with standard analyzer with a simple field query + SearchResponse search = client().prepareSearch() + .setQuery(matchQuery("title", "this is a test")) + .highlighter(new HighlightBuilder().encoder("html").field("title", 50, 1)) + .get(); - assertHighlight(search, 0, "title", 0, 1, equalTo("this is a test")); + assertHighlight(search, 0, "title", 0, 1, equalTo("this is a test")); - // search on title.key and highlight on title - search = client().prepareSearch() - .setQuery(matchQuery("title.key", "this is a test")) - .highlighter(new HighlightBuilder().encoder("html").field("title.key", 50, 1).highlighterType(type)) - .get(); + // search on title.key and highlight on title + search = client().prepareSearch() + .setQuery(matchQuery("title.key", "this is a test")) + .highlighter(new HighlightBuilder().encoder("html").field("title.key", 50, 1)) + .get(); - assertHighlight(search, 0, "title.key", 0, 1, equalTo("this is a test")); - } + assertHighlight(search, 0, "title.key", 0, 1, equalTo("this is a test")); } public void testMultiMapperVectorFromSource() throws Exception { @@ -1157,23 +1140,21 @@ public void testMultiMapperVectorFromSource() throws Exception { client().prepareIndex("test", "type1", "1").setSource("title", "this is a test").get(); refresh(); - for (String type : UNIFIED_AND_NULL) { - // simple search on body with standard analyzer with a simple field query - SearchResponse search = client().prepareSearch() - .setQuery(matchQuery("title", "this is a test")) - .highlighter(new HighlightBuilder().encoder("html").field("title", 50, 1).highlighterType(type)) - .get(); + // simple search on body with standard analyzer with a simple field query + SearchResponse search = client().prepareSearch() + .setQuery(matchQuery("title", "this is a test")) + .highlighter(new HighlightBuilder().encoder("html").field("title", 50, 1)) + .get(); - assertHighlight(search, 0, "title", 0, 1, equalTo("this is a test")); + assertHighlight(search, 0, "title", 0, 1, equalTo("this is a test")); - // search on title.key and highlight on title.key - search = client().prepareSearch() - .setQuery(matchQuery("title.key", "this is a test")) - .highlighter(new HighlightBuilder().encoder("html").field("title.key", 50, 1).highlighterType(type)) - .get(); + // search on title.key and highlight on title.key + search = client().prepareSearch() + .setQuery(matchQuery("title.key", "this is a test")) + .highlighter(new HighlightBuilder().encoder("html").field("title.key", 50, 1)) + .get(); - assertHighlight(search, 0, "title.key", 0, 1, equalTo("this is a test")); - } + assertHighlight(search, 0, "title.key", 0, 1, equalTo("this is a test")); } public void testMultiMapperNoVectorWithStore() throws Exception { @@ -1200,23 +1181,21 @@ public void testMultiMapperNoVectorWithStore() throws Exception { refresh(); - for (String type : UNIFIED_AND_NULL) { - // simple search on body with standard analyzer with a simple field query - SearchResponse search = client().prepareSearch() - .setQuery(matchQuery("title", "this is a test")) - .highlighter(new HighlightBuilder().encoder("html").field("title", 50, 1).highlighterType(type)) - .get(); + // simple search on body with standard analyzer with a simple field query + SearchResponse search = client().prepareSearch() + .setQuery(matchQuery("title", "this is a test")) + .highlighter(new HighlightBuilder().encoder("html").field("title", 50, 1)) + .get(); - assertHighlight(search, 0, "title", 0, 1, equalTo("this is a test")); + assertHighlight(search, 0, "title", 0, 1, equalTo("this is a test")); - // search on title.key and highlight on title - search = client().prepareSearch() - .setQuery(matchQuery("title.key", "this is a test")) - .highlighter(new HighlightBuilder().encoder("html").field("title.key", 50, 1).highlighterType(type)) - .get(); + // search on title.key and highlight on title + search = client().prepareSearch() + .setQuery(matchQuery("title.key", "this is a test")) + .highlighter(new HighlightBuilder().encoder("html").field("title.key", 50, 1)) + .get(); - assertHighlight(search, 0, "title.key", 0, 1, equalTo("this is a test")); - } + assertHighlight(search, 0, "title.key", 0, 1, equalTo("this is a test")); } public void testMultiMapperNoVectorFromSource() throws Exception { @@ -1241,23 +1220,21 @@ public void testMultiMapperNoVectorFromSource() throws Exception { client().prepareIndex("test", "type1", "1").setSource("title", "this is a test").get(); refresh(); - for (String type : UNIFIED_AND_NULL) { - // simple search on body with standard analyzer with a simple field query - SearchResponse search = client().prepareSearch() - .setQuery(matchQuery("title", "this is a test")) - .highlighter(new HighlightBuilder().encoder("html").field("title", 50, 1).highlighterType(type)) - .get(); + // simple search on body with standard analyzer with a simple field query + SearchResponse search = client().prepareSearch() + .setQuery(matchQuery("title", "this is a test")) + .highlighter(new HighlightBuilder().encoder("html").field("title", 50, 1)) + .get(); - assertHighlight(search, 0, "title", 0, 1, equalTo("this is a test")); + assertHighlight(search, 0, "title", 0, 1, equalTo("this is a test")); - // search on title.key and highlight on title.key - search = client().prepareSearch() - .setQuery(matchQuery("title.key", "this is a test")) - .highlighter(new HighlightBuilder().encoder("html").field("title.key", 50, 1).highlighterType(type)) - .get(); + // search on title.key and highlight on title.key + search = client().prepareSearch() + .setQuery(matchQuery("title.key", "this is a test")) + .highlighter(new HighlightBuilder().encoder("html").field("title.key", 50, 1)) + .get(); - assertHighlight(search, 0, "title.key", 0, 1, equalTo("this is a test")); - } + assertHighlight(search, 0, "title.key", 0, 1, equalTo("this is a test")); } public void testFastVectorHighlighterShouldFailIfNoTermVectors() throws Exception { @@ -1305,7 +1282,7 @@ public void testDisableFastVectorHighlighter() throws Exception { SearchResponse search = client().prepareSearch() .setQuery(matchPhraseQuery("title", "test for the workaround")) - .highlighter(new HighlightBuilder().field("title", 50, 1, 10)) + .highlighter(new HighlightBuilder().field("title", 50, 1, 10).highlighterType("fvh")) .get(); for (int i = 0; i < indexRequestBuilders.length; i++) { @@ -1350,7 +1327,7 @@ public void testFSHHighlightAllMvFragments() throws Exception { SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("tags", "tag")) - .highlighter(new HighlightBuilder().field("tags", -1, 0)).get(); + .highlighter(new HighlightBuilder().field("tags", -1, 0).highlighterType("fvh")).get(); assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); assertHighlight(response, 0, "tags", 1, 2, @@ -1364,16 +1341,14 @@ public void testBoostingQuery() { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); - for (String type : UNIFIED_AND_NULL) { - logger.info("--> highlighting and searching on field1"); - SearchSourceBuilder source = searchSource() - .query(boostingQuery(termQuery("field2", "brown"), termQuery("field2", "foobar")).negativeBoost(0.5f)) - .highlighter(highlight().field("field2").order("score").preTags("").postTags("").highlighterType(type)); + logger.info("--> highlighting and searching on field1"); + SearchSourceBuilder source = searchSource() + .query(boostingQuery(termQuery("field2", "brown"), termQuery("field2", "foobar")).negativeBoost(0.5f)) + .highlighter(highlight().field("field2").order("score").preTags("").postTags("")); - SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); + SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); - assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); - } + assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); } public void testBoostingQueryTermVector() throws IOException { @@ -1404,14 +1379,12 @@ public void testCommonTermsQuery() { refresh(); logger.info("--> highlighting and searching on field1"); - for (String type : UNIFIED_AND_NULL) { - SearchSourceBuilder source = searchSource() - .query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100)) - .highlighter(highlight().field("field2").order("score").preTags("").postTags("").highlighterType(type)); + SearchSourceBuilder source = searchSource() + .query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100)) + .highlighter(highlight().field("field2").order("score").preTags("").postTags("")); - SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); - assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); - } + SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); + assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); } public void testCommonTermsTermVector() throws IOException { @@ -1453,84 +1426,86 @@ public void testPhrasePrefix() throws IOException { refresh(); logger.info("--> highlighting and searching on field0"); - for (String type : UNIFIED_AND_NULL) { - SearchSourceBuilder source = searchSource() + SearchSourceBuilder source = searchSource() .query(matchPhrasePrefixQuery("field0", "bro")) - .highlighter(highlight().field("field0").order("score").preTags("").postTags("").highlighterType(type)); + .highlighter(highlight().field("field0").order("score").preTags("").postTags("")); + SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet(); - SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + assertHighlight(searchResponse, 0, "field0", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); - assertHighlight(searchResponse, 0, "field0", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); + source = searchSource() + .query(matchPhrasePrefixQuery("field0", "quick bro")) + .highlighter(highlight().field("field0").order("score").preTags("").postTags("")); + searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + assertHighlight(searchResponse, 0, "field0", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); - source = searchSource() - .query(matchPhrasePrefixQuery("field0", "quick bro")) - .highlighter(highlight().field("field0").order("score").preTags("").postTags("").highlighterType(type)); + logger.info("--> highlighting and searching on field1"); + source = searchSource() + .query(boolQuery() + .should(matchPhrasePrefixQuery("field1", "test")) + .should(matchPhrasePrefixQuery("field1", "bro")) + ) + .highlighter(highlight().field("field1").order("score").preTags("").postTags("")); - searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + assertThat(searchResponse.getHits().totalHits, equalTo(2L)); + for (int i = 0; i < 2; i++) { + assertHighlight(searchResponse, i, "field1", 0, 1, anyOf( + equalTo("The quick browse button is a fancy thing, right bro?"), + equalTo("The quick brown fox jumps over the lazy dog"))); + } - assertHighlight(searchResponse, 0, "field0", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); + source = searchSource() + .query(matchPhrasePrefixQuery("field1", "quick bro")) + .highlighter(highlight().field("field1").order("score").preTags("").postTags("")); + + searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + + assertHighlight(searchResponse, 0, "field1", 0, 1, anyOf( + equalTo("The quick browse button is a fancy thing, right bro?"), + equalTo("The quick brown fox jumps over the lazy dog"))); + assertHighlight(searchResponse, 1, "field1", 0, 1, anyOf( + equalTo("The quick browse button is a fancy thing, right bro?"), + equalTo("The quick brown fox jumps over the lazy dog"))); + + // with synonyms + client().prepareIndex("test", "type2", "0").setSource( + "field4", "The quick brown fox jumps over the lazy dog", + "field3", "The quick brown fox jumps over the lazy dog").get(); + client().prepareIndex("test", "type2", "1").setSource( + "field4", "The quick browse button is a fancy thing, right bro?").get(); + client().prepareIndex("test", "type2", "2").setSource( + "field4", "a quick fast blue car").get(); + refresh(); - logger.info("--> highlighting and searching on field1"); - source = searchSource() - .query(matchPhrasePrefixQuery("field1", "quick bro")) - .highlighter(highlight().field("field1").order("score").preTags("").postTags("").highlighterType(type)); + source = searchSource().postFilter(typeQuery("type2")).query(matchPhrasePrefixQuery("field3", "fast bro")) + .highlighter(highlight().field("field3").order("score").preTags("").postTags("")); - searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + searchResponse = client().search(searchRequest("test").source(source)).actionGet(); - assertHighlight(searchResponse, 0, "field1", 0, 1, anyOf( - equalTo("The quick browse button is a fancy thing, right bro?"), - equalTo("The quick brown fox jumps over the lazy dog"), - equalTo("The quick browse button is a fancy thing, right bro?"), - equalTo("The quick brown fox jumps over the lazy dog"))); - assertHighlight(searchResponse, 1, "field1", 0, 1, anyOf( - equalTo("The quick browse button is a fancy thing, right bro?"), - equalTo("The quick brown fox jumps over the lazy dog"), - equalTo("The quick browse button is a fancy thing, right bro?"), - equalTo("The quick brown fox jumps over the lazy dog"))); - - // with synonyms - client().prepareIndex("test", "type2", "0").setSource( - "field4", "The quick brown fox jumps over the lazy dog", - "field3", "The quick brown fox jumps over the lazy dog").get(); - client().prepareIndex("test", "type2", "1").setSource( - "field4", "The quick browse button is a fancy thing, right bro?").get(); - client().prepareIndex("test", "type2", "2").setSource( - "field4", "a quick fast blue car").get(); - refresh(); - - source = searchSource().postFilter(typeQuery("type2")).query(matchPhrasePrefixQuery("field3", "fast bro")) - .highlighter(highlight().field("field3").order("score").preTags("").postTags("").highlighterType(type)); + assertHighlight(searchResponse, 0, "field3", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); - searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + logger.info("--> highlighting and searching on field4"); + source = searchSource().postFilter(typeQuery("type2")).query(matchPhrasePrefixQuery("field4", "the fast bro")) + .highlighter(highlight().field("field4").order("score").preTags("").postTags("")); + searchResponse = client().search(searchRequest("test").source(source)).actionGet(); - assertHighlight(searchResponse, 0, "field3", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); + assertHighlight(searchResponse, 0, "field4", 0, 1, anyOf( + equalTo("The quick browse button is a fancy thing, right bro?"), + equalTo("The quick brown fox jumps over the lazy dog"))); + assertHighlight(searchResponse, 1, "field4", 0, 1, anyOf( + equalTo("The quick browse button is a fancy thing, right bro?"), + equalTo("The quick brown fox jumps over the lazy dog"))); - logger.info("--> highlighting and searching on field4"); - source = searchSource().postFilter(typeQuery("type2")).query(matchPhrasePrefixQuery("field4", "the fast bro")) - .highlighter(highlight().field("field4").order("score").preTags("").postTags("").highlighterType(type)); - searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + logger.info("--> highlighting and searching on field4"); + source = searchSource().postFilter(typeQuery("type2")).query(matchPhrasePrefixQuery("field4", "a fast quick blue ca")) + .highlighter(highlight().field("field4").order("score").preTags("").postTags("")); + searchResponse = client().search(searchRequest("test").source(source)).actionGet(); - assertHighlight(searchResponse, 0, "field4", 0, 1, anyOf( - equalTo("The quick browse button is a fancy thing, right bro?"), - equalTo("The quick brown fox jumps over the lazy dog"), - equalTo("The quick browse button is a fancy thing, right bro?"), - equalTo("The quick brown fox jumps over the lazy dog"))); - assertHighlight(searchResponse, 1, "field4", 0, 1, anyOf( - equalTo("The quick browse button is a fancy thing, right bro?"), - equalTo("The quick brown fox jumps over the lazy dog"), - equalTo("The quick browse button is a fancy thing, right bro?"), - equalTo("The quick brown fox jumps over the lazy dog"))); - - logger.info("--> highlighting and searching on field4"); - source = searchSource().postFilter(typeQuery("type2")).query(matchPhrasePrefixQuery("field4", "a fast quick blue ca")) - .highlighter(highlight().field("field4").order("score").preTags("").postTags("").highlighterType(type)); - searchResponse = client().search(searchRequest("test").source(source)).actionGet(); - - assertHighlight(searchResponse, 0, "field4", 0, 1, - anyOf(equalTo("a quick fast blue car"), - equalTo("a quick fast blue car"))); - } + assertHighlight(searchResponse, 0, "field4", 0, 1, + anyOf(equalTo("a quick fast blue car"), + equalTo("a quick fast blue car"))); } public void testPlainHighlightDifferentFragmenter() throws Exception { @@ -1546,8 +1521,9 @@ public void testPlainHighlightDifferentFragmenter() throws Exception { SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQuery.Type.PHRASE)) .highlighter( - new HighlightBuilder().field(new HighlightBuilder.Field("tags").fragmentSize(-1).numOfFragments(2) - .fragmenter("simple"))).get(); + new HighlightBuilder().field(new HighlightBuilder.Field("tags") + .highlighterType("plain").fragmentSize(-1).numOfFragments(2).fragmenter("simple"))) + .get(); assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); assertHighlight(response, 0, "tags", 1, 2, @@ -1556,7 +1532,7 @@ public void testPlainHighlightDifferentFragmenter() throws Exception { response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQuery.Type.PHRASE)) .highlighter( - new HighlightBuilder().field(new HighlightBuilder.Field("tags").fragmentSize(-1).numOfFragments(2) + new HighlightBuilder().field(new Field("tags").highlighterType("plain").fragmentSize(-1).numOfFragments(2) .fragmenter("span"))).get(); assertHighlight(response, 0, "tags", 0, @@ -1567,7 +1543,7 @@ public void testPlainHighlightDifferentFragmenter() throws Exception { assertFailures(client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQuery.Type.PHRASE)) .highlighter( - new HighlightBuilder().field(new HighlightBuilder.Field("tags").fragmentSize(-1).numOfFragments(2) + new HighlightBuilder().field(new Field("tags").highlighterType("plain").fragmentSize(-1).numOfFragments(2) .fragmenter("invalid"))), RestStatus.BAD_REQUEST, containsString("unknown fragmenter option [invalid] for the field [tags]")); @@ -1621,15 +1597,13 @@ public void testMissingStoredField() throws Exception { .endObject()).get(); refresh(); - for (String type : UNIFIED_AND_NULL) { - // This query used to fail when the field to highlight was absent - SearchResponse response = client().prepareSearch("test") - .setQuery(QueryBuilders.matchQuery("field", "highlight").type(MatchQuery.Type.BOOLEAN)) - .highlighter( - new HighlightBuilder().field(new HighlightBuilder.Field("highlight_field").fragmentSize(-1).numOfFragments(1) - .fragmenter("simple")).highlighterType(type)).get(); - assertThat(response.getHits().getHits()[0].getHighlightFields().isEmpty(), equalTo(true)); - } + // This query used to fail when the field to highlight was absent + SearchResponse response = client().prepareSearch("test") + .setQuery(QueryBuilders.matchQuery("field", "highlight").type(MatchQuery.Type.BOOLEAN)) + .highlighter( + new HighlightBuilder().field(new HighlightBuilder.Field("highlight_field").fragmentSize(-1).numOfFragments(1) + .fragmenter("simple"))).get(); + assertThat(response.getHits().getHits()[0].getHighlightFields().isEmpty(), equalTo(true)); } // Issue #3211 @@ -1674,13 +1648,11 @@ public void testResetTwice() throws Exception { .setSource("text", "elasticsearch test").get(); refresh(); - for (String type : UNIFIED_AND_NULL) { - SearchResponse response = client().prepareSearch("test") - .setQuery(QueryBuilders.matchQuery("text", "test").type(MatchQuery.Type.BOOLEAN)) - .highlighter(new HighlightBuilder().field("text").highlighterType(type)).execute().actionGet(); - // PatternAnalyzer will throw an exception if it is resetted twice - assertHitCount(response, 1L); - } + SearchResponse response = client().prepareSearch("test") + .setQuery(QueryBuilders.matchQuery("text", "test").type(MatchQuery.Type.BOOLEAN)) + .highlighter(new HighlightBuilder().field("text")).execute().actionGet(); + // PatternAnalyzer will throw an exception if it is resetted twice + assertHitCount(response, 1L); } public void testHighlightUsesHighlightQuery() throws IOException { @@ -1745,10 +1717,6 @@ public void testHighlightNoMatchSize() throws IOException { response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); assertNotHighlighted(response, 0, "text"); - field.highlighterType("postings"); - response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); - assertNotHighlighted(response, 0, "text"); - field.highlighterType("unified"); response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); assertNotHighlighted(response, 0, "text"); @@ -1762,10 +1730,6 @@ public void testHighlightNoMatchSize() throws IOException { response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); assertNotHighlighted(response, 0, "text"); - field.highlighterType("postings"); - response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); - assertNotHighlighted(response, 0, "text"); - field.highlighterType("unified"); response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); assertNotHighlighted(response, 0, "text"); @@ -1785,11 +1749,6 @@ public void testHighlightNoMatchSize() throws IOException { response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some")); - // Postings hl also works but the fragment is the whole first sentence (size ignored) - field.highlighterType("postings"); - response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); - assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off.")); - // We can also ask for a fragment longer than the input string and get the whole string field.highlighterType("plain").noMatchSize(text.length() * 2); response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); @@ -1803,11 +1762,6 @@ public void testHighlightNoMatchSize() throws IOException { response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); assertHighlight(response, 0, "text", 0, 1, equalTo(text)); - //no difference using postings hl as the noMatchSize is ignored (just needs to be greater than 0) - field.highlighterType("postings"); - response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); - assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off.")); - // We can also ask for a fragment exactly the size of the input field and get the whole field field.highlighterType("plain").noMatchSize(text.length()); response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); @@ -1822,11 +1776,6 @@ public void testHighlightNoMatchSize() throws IOException { response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); assertHighlight(response, 0, "text", 0, 1, equalTo(text)); - //no difference using postings hl as the noMatchSize is ignored (just needs to be greater than 0) - field.highlighterType("postings"); - response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); - assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off.")); - // You can set noMatchSize globally in the highlighter as well field.highlighterType("plain").noMatchSize(null); response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field).noMatchSize(21)).get(); @@ -1840,10 +1789,6 @@ public void testHighlightNoMatchSize() throws IOException { response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field).noMatchSize(21)).get(); assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some")); - field.highlighterType("postings"); - response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field).noMatchSize(21)).get(); - assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off.")); - // We don't break if noMatchSize is less than zero though field.highlighterType("plain").noMatchSize(randomIntBetween(Integer.MIN_VALUE, -1)); response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); @@ -1853,10 +1798,6 @@ public void testHighlightNoMatchSize() throws IOException { response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); assertNotHighlighted(response, 0, "text"); - field.highlighterType("postings"); - response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); - assertNotHighlighted(response, 0, "text"); - field.highlighterType("unified"); response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); assertNotHighlighted(response, 0, "text"); @@ -1889,11 +1830,6 @@ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some")); - // Postings hl also works but the fragment is the whole first sentence (size ignored) - field.highlighterType("postings"); - response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); - assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off.")); - // And noMatchSize returns nothing when the first entry is empty string! index("test", "type1", "2", "text", new String[] {"", text2}); refresh(); @@ -1911,12 +1847,6 @@ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { .highlighter(new HighlightBuilder().field(field)).get(); assertNotHighlighted(response, 0, "text"); - field.highlighterType("postings"); - response = client().prepareSearch("test") - .setQuery(idsQueryBuilder) - .highlighter(new HighlightBuilder().field(field)).get(); - assertNotHighlighted(response, 0, "text"); - // except for the unified highlighter which starts from the first string with actual content field.highlighterType("unified"); response = client().prepareSearch("test") @@ -1940,12 +1870,6 @@ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { .highlighter(new HighlightBuilder().field(field)).get(); assertNotHighlighted(response, 0, "text"); - field.highlighterType("postings"); - response = client().prepareSearch("test") - .setQuery(idsQueryBuilder) - .highlighter(new HighlightBuilder().field(field)).get(); - assertNotHighlighted(response, 0, "text"); - field.highlighterType("unified"); response = client().prepareSearch("test") .setQuery(idsQueryBuilder) @@ -1986,10 +1910,6 @@ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); assertNotHighlighted(response, 0, "text"); - field.highlighterType("postings"); - response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); - assertNotHighlighted(response, 0, "text"); - field.highlighterType("unified"); response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); assertNotHighlighted(response, 0, "text"); @@ -2024,11 +1944,6 @@ public void testHighlightNoMatchSizeNumberOfFragments() throws IOException { assertHighlight(response, 0, "text", 0, 1, equalTo("This is the first sentence")); - // Postings hl also works but the fragment is the whole first sentence (size ignored) - field.highlighterType("postings"); - response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); - assertHighlight(response, 0, "text", 0, 1, equalTo("This is the first sentence.")); - //if there's a match we only return the values with matches (whole value as number_of_fragments == 0) MatchQueryBuilder queryBuilder = QueryBuilders.matchQuery("text", "third fifth"); field.highlighterType("plain"); @@ -2041,11 +1956,6 @@ public void testHighlightNoMatchSizeNumberOfFragments() throws IOException { assertHighlight(response, 0, "text", 0, 2, equalTo("This is the third sentence. This is the fourth sentence.")); assertHighlight(response, 0, "text", 1, 2, equalTo("This is the fifth sentence")); - field.highlighterType("postings"); - response = client().prepareSearch("test").setQuery(queryBuilder).highlighter(new HighlightBuilder().field(field)).get(); - assertHighlight(response, 0, "text", 0, 2, equalTo("This is the third sentence. This is the fourth sentence.")); - assertHighlight(response, 0, "text", 1, 2, equalTo("This is the fifth sentence")); - field.highlighterType("unified"); response = client().prepareSearch("test").setQuery(queryBuilder).highlighter(new HighlightBuilder().field(field)).get(); assertHighlight(response, 0, "text", 0, 2, equalTo("This is the third sentence. This is the fourth sentence.")); @@ -2060,49 +1970,42 @@ public void testPostingsHighlighter() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy quick dog").get(); refresh(); - for (String type : UNIFIED_AND_NULL) { - logger.info("--> highlighting and searching on field1"); - SearchSourceBuilder source = searchSource() - .query(termQuery("field1", "test")) - .highlighter(highlight().field("field1").preTags("").postTags("").highlighterType(type)); - SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + logger.info("--> highlighting and searching on field1"); + SearchSourceBuilder source = searchSource() + .query(termQuery("field1", "test")) + .highlighter(highlight().field("field1").preTags("").postTags("")); + SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet(); - assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a test")); + assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a test")); - logger.info("--> searching on field1, highlighting on field1"); - source = searchSource() - .query(termQuery("field1", "test")) - .highlighter(highlight().field("field1").preTags("").postTags("").highlighterType(type)); + logger.info("--> searching on field1, highlighting on field1"); + source = searchSource() + .query(termQuery("field1", "test")) + .highlighter(highlight().field("field1").preTags("").postTags("")); - searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + searchResponse = client().search(searchRequest("test").source(source)).actionGet(); - assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a test")); + assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a test")); - logger.info("--> searching on field2, highlighting on field2"); - source = searchSource() - .query(termQuery("field2", "quick")) - .highlighter(highlight().field("field2").order("score").preTags("").postTags("").highlighterType(type)); + logger.info("--> searching on field2, highlighting on field2"); + source = searchSource() + .query(termQuery("field2", "quick")) + .highlighter(highlight().field("field2").order("score").preTags("").postTags("")); - searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + searchResponse = client().search(searchRequest("test").source(source)).actionGet(); - assertHighlight(searchResponse, 0, "field2", 0, 1, - equalTo("The quick brown fox jumps over the lazy quick dog")); + assertHighlight(searchResponse, 0, "field2", 0, 1, + equalTo("The quick brown fox jumps over the lazy quick dog")); - logger.info("--> searching on field2, highlighting on field2"); - source = searchSource() - .query(matchPhraseQuery("field2", "quick brown")) - .highlighter(highlight().field("field2").preTags("").postTags("").highlighterType(type)); + logger.info("--> searching on field2, highlighting on field2"); + source = searchSource() + .query(matchPhraseQuery("field2", "quick brown")) + .highlighter(highlight().field("field2").preTags("").postTags("")); - searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + searchResponse = client().search(searchRequest("test").source(source)).actionGet(); - if (type == null) { - //phrase query results in highlighting all different terms regardless of their positions - assertHighlight(searchResponse, 0, "field2", 0, 1, - equalTo("The quick brown fox jumps over the lazy quick dog")); - } else { - assertHighlight(searchResponse, 0, "field2", 0, 1, - equalTo("The quick brown fox jumps over the lazy quick dog")); - } + assertHighlight(searchResponse, 0, "field2", 0, 1, + equalTo("The quick brown fox jumps over the lazy quick dog")); //lets fall back to the standard highlighter then, what people would do to highlight query matches logger.info("--> searching on field2, highlighting on field2, falling back to the plain highlighter"); @@ -2111,11 +2014,10 @@ public void testPostingsHighlighter() throws Exception { .highlighter(highlight() .field("field2").preTags("").postTags("").highlighterType("plain").requireFieldMatch(false)); - searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + searchResponse = client().search(searchRequest("test").source(source)).actionGet(); - assertHighlight(searchResponse, 0, "field2", 0, 1, - equalTo("The quick brown fox jumps over the lazy quick dog")); - } + assertHighlight(searchResponse, 0, "field2", 0, 1, + equalTo("The quick brown fox jumps over the lazy quick dog")); } public void testPostingsHighlighterMultipleFields() throws Exception { @@ -2127,15 +2029,13 @@ public void testPostingsHighlighterMultipleFields() throws Exception { "field2", "The slow brown fox. Second sentence."); refresh(); - for (String type : UNIFIED_AND_NULL) { - SearchResponse response = client().prepareSearch("test") - .setQuery(QueryBuilders.matchQuery("field1", "fox")) - .highlighter( - new HighlightBuilder().field(new Field("field1").preTags("<1>").postTags("") - .requireFieldMatch(true).highlighterType(type))) - .get(); - assertHighlight(response, 0, "field1", 0, 1, equalTo("The quick brown <1>fox.")); - } + SearchResponse response = client().prepareSearch("test") + .setQuery(QueryBuilders.matchQuery("field1", "fox")) + .highlighter( + new HighlightBuilder().field(new Field("field1").preTags("<1>").postTags("") + .requireFieldMatch(true))) + .get(); + assertHighlight(response, 0, "field1", 0, 1, equalTo("The quick brown <1>fox.")); } public void testPostingsHighlighterNumberOfFragments() throws Exception { @@ -2150,53 +2050,50 @@ public void testPostingsHighlighterNumberOfFragments() throws Exception { refresh(); - for (String type : UNIFIED_AND_NULL) { - logger.info("--> highlighting and searching on field1"); - SearchSourceBuilder source = searchSource() - .query(termQuery("field1", "fox")) - .highlighter(highlight() - .field(new Field("field1").numOfFragments(5).preTags("").postTags("").highlighterType(type))); - - SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + logger.info("--> highlighting and searching on field1"); + SearchSourceBuilder source = searchSource() + .query(termQuery("field1", "fox")) + .highlighter(highlight() + .field(new Field("field1").numOfFragments(5).preTags("").postTags(""))); - assertHighlight(searchResponse, 0, "field1", 0, equalTo("The quick brown fox jumps over the lazy dog.")); - assertHighlight(searchResponse, 0, "field1", 1, equalTo("The lazy red fox jumps over the quick dog.")); - assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo("The quick brown dog jumps over the lazy fox.")); + SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet(); - client().prepareIndex("test", "type1", "2") - .setSource("field1", new String[]{ - "The quick brown fox jumps over the lazy dog. Second sentence not finished", - "The lazy red fox jumps over the quick dog.", - "The quick brown dog jumps over the lazy fox."}).get(); - refresh(); + assertHighlight(searchResponse, 0, "field1", 0, equalTo("The quick brown fox jumps over the lazy dog.")); + assertHighlight(searchResponse, 0, "field1", 1, equalTo("The lazy red fox jumps over the quick dog.")); + assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo("The quick brown dog jumps over the lazy fox.")); - source = searchSource() - .query(termQuery("field1", "fox")) - .highlighter(highlight() - .field(new Field("field1").numOfFragments(0).preTags("").postTags("").highlighterType(type))); + client().prepareIndex("test", "type1", "2") + .setSource("field1", new String[]{ + "The quick brown fox jumps over the lazy dog. Second sentence not finished", + "The lazy red fox jumps over the quick dog.", + "The quick brown dog jumps over the lazy fox."}).get(); + refresh(); - searchResponse = client().search(searchRequest("test").source(source)).actionGet(); - assertHitCount(searchResponse, 2L); - - for (SearchHit searchHit : searchResponse.getHits()) { - if ("1".equals(searchHit.getId())) { - assertHighlight(searchHit, "field1", 0, 1, equalTo("The quick brown fox jumps over the lazy dog. " - + "The lazy red fox jumps over the quick dog. " - + "The quick brown dog jumps over the lazy fox.")); - } else if ("2".equals(searchHit.getId())) { - assertHighlight(searchHit, "field1", 0, 3, - equalTo("The quick brown fox jumps over the lazy dog. Second sentence not finished")); - assertHighlight(searchHit, "field1", 1, 3, equalTo("The lazy red fox jumps over the quick dog.")); - assertHighlight(searchHit, "field1", 2, 3, equalTo("The quick brown dog jumps over the lazy fox.")); - } else { - fail("Only hits with id 1 and 2 are returned"); - } + source = searchSource() + .query(termQuery("field1", "fox")) + .highlighter(highlight() + .field(new Field("field1").numOfFragments(0).preTags("").postTags(""))); + + searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + assertHitCount(searchResponse, 2L); + + for (SearchHit searchHit : searchResponse.getHits()) { + if ("1".equals(searchHit.getId())) { + assertHighlight(searchHit, "field1", 0, 1, equalTo("The quick brown fox jumps over the lazy dog. " + + "The lazy red fox jumps over the quick dog. " + + "The quick brown dog jumps over the lazy fox.")); + } else if ("2".equals(searchHit.getId())) { + assertHighlight(searchHit, "field1", 0, 3, + equalTo("The quick brown fox jumps over the lazy dog. Second sentence not finished")); + assertHighlight(searchHit, "field1", 1, 3, equalTo("The lazy red fox jumps over the quick dog.")); + assertHighlight(searchHit, "field1", 2, 3, equalTo("The quick brown dog jumps over the lazy fox.")); + } else { + fail("Only hits with id 1 and 2 are returned"); } } } public void testMultiMatchQueryHighlight() throws IOException { - String[] highlighterTypes = new String[] {"fvh", "plain", "postings", "unified"}; XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties") .startObject("field1") @@ -2219,23 +2116,10 @@ public void testMultiMatchQueryHighlight() throws IOException { refresh(); final int iters = scaledRandomIntBetween(20, 30); for (int i = 0; i < iters; i++) { - String highlighterType = rarely() ? null : RandomPicks.randomFrom(random(), highlighterTypes); - MultiMatchQueryBuilder.Type[] supportedQueryTypes; - if ("postings".equals(highlighterType)) { - /* - * phrase_prefix is not supported by postings highlighter, as it rewrites against an empty reader, the prefix will never - * match any term - */ - supportedQueryTypes = new MultiMatchQueryBuilder.Type[]{ - MultiMatchQueryBuilder.Type.BEST_FIELDS, - MultiMatchQueryBuilder.Type.CROSS_FIELDS, - MultiMatchQueryBuilder.Type.MOST_FIELDS, - MultiMatchQueryBuilder.Type.PHRASE}; - } else { - supportedQueryTypes = MultiMatchQueryBuilder.Type.values(); - } - MultiMatchQueryBuilder.Type matchQueryType = RandomPicks.randomFrom(random(), supportedQueryTypes); - MultiMatchQueryBuilder multiMatchQueryBuilder = multiMatchQuery("the quick brown fox", "field1", "field2").type(matchQueryType); + String highlighterType = rarely() ? null : RandomPicks.randomFrom(random(), ALL_TYPES); + MultiMatchQueryBuilder.Type matchQueryType = RandomPicks.randomFrom(random(), MultiMatchQueryBuilder.Type.values()); + MultiMatchQueryBuilder multiMatchQueryBuilder = multiMatchQuery("the quick brown fox", "field1", "field2") + .type(matchQueryType); SearchSourceBuilder source = searchSource() .query(multiMatchQueryBuilder) @@ -2264,26 +2148,24 @@ public void testPostingsHighlighterOrderByScore() throws Exception { + "This one contains no matches."}).get(); refresh(); - for (String type : UNIFIED_AND_NULL) { - logger.info("--> highlighting and searching on field1"); - SearchSourceBuilder source = searchSource() - .query(termQuery("field1", "sentence")) - .highlighter(highlight().field("field1").order("score").highlighterType(type)); + logger.info("--> highlighting and searching on field1"); + SearchSourceBuilder source = searchSource() + .query(termQuery("field1", "sentence")) + .highlighter(highlight().field("field1").order("score")); - SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet(); - Map highlightFieldMap = searchResponse.getHits().getAt(0).getHighlightFields(); - assertThat(highlightFieldMap.size(), equalTo(1)); - HighlightField field1 = highlightFieldMap.get("field1"); - assertThat(field1.fragments().length, equalTo(5)); - assertThat(field1.fragments()[0].string(), - equalTo("This sentence contains three sentence occurrences (sentence).")); - assertThat(field1.fragments()[1].string(), equalTo("This sentence contains two sentence matches.")); - assertThat(field1.fragments()[2].string(), equalTo("This is the second value's first sentence.")); - assertThat(field1.fragments()[3].string(), equalTo("This sentence contains one match, not that short.")); - assertThat(field1.fragments()[4].string(), - equalTo("One sentence match here and scored lower since the text is quite long, not that appealing.")); - } + Map highlightFieldMap = searchResponse.getHits().getAt(0).getHighlightFields(); + assertThat(highlightFieldMap.size(), equalTo(1)); + HighlightField field1 = highlightFieldMap.get("field1"); + assertThat(field1.fragments().length, equalTo(5)); + assertThat(field1.fragments()[0].string(), + equalTo("This sentence contains three sentence occurrences (sentence).")); + assertThat(field1.fragments()[1].string(), equalTo("This sentence contains two sentence matches.")); + assertThat(field1.fragments()[2].string(), equalTo("This is the second value's first sentence.")); + assertThat(field1.fragments()[3].string(), equalTo("This sentence contains one match, not that short.")); + assertThat(field1.fragments()[4].string(), + equalTo("One sentence match here and scored lower since the text is quite long, not that appealing.")); } public void testPostingsHighlighterEscapeHtml() throws Exception { @@ -2297,15 +2179,13 @@ public void testPostingsHighlighterEscapeHtml() throws Exception { } indexRandom(true, indexRequestBuilders); - for (String type : UNIFIED_AND_NULL) { - SearchResponse searchResponse = client().prepareSearch() - .setQuery(matchQuery("title", "test")) - .highlighter(new HighlightBuilder().field("title").encoder("html").highlighterType(type)).get(); + SearchResponse searchResponse = client().prepareSearch() + .setQuery(matchQuery("title", "test")) + .highlighter(new HighlightBuilder().field("title").encoder("html")).get(); - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight(searchResponse, i, "title", 0, 1, - equalTo("This is a html escaping highlighting test for *&?")); - } + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight(searchResponse, i, "title", 0, 1, + equalTo("This is a html escaping highlighting test for *&?")); } } @@ -2330,28 +2210,26 @@ public void testPostingsHighlighterMultiMapperWithStore() throws Exception { client().prepareIndex("test", "type1", "1").setSource("title", "this is a test . Second sentence.").get(); refresh(); - for (String type : UNIFIED_AND_NULL) { - // simple search on body with standard analyzer with a simple field query - SearchResponse searchResponse = client().prepareSearch() - //lets make sure we analyze the query and we highlight the resulting terms - .setQuery(matchQuery("title", "This is a Test")) - .highlighter(new HighlightBuilder().field("title").highlighterType(type)).get(); + // simple search on body with standard analyzer with a simple field query + SearchResponse searchResponse = client().prepareSearch() + //lets make sure we analyze the query and we highlight the resulting terms + .setQuery(matchQuery("title", "This is a Test")) + .highlighter(new HighlightBuilder().field("title")).get(); - assertHitCount(searchResponse, 1L); - SearchHit hit = searchResponse.getHits().getAt(0); - //stopwords are not highlighted since not indexed - assertHighlight(hit, "title", 0, 1, equalTo("this is a test .")); - - // search on title.key and highlight on title - searchResponse = client().prepareSearch() - .setQuery(matchQuery("title.key", "this is a test")) - .highlighter(new HighlightBuilder().field("title.key")).get(); - assertHitCount(searchResponse, 1L); + assertHitCount(searchResponse, 1L); + SearchHit hit = searchResponse.getHits().getAt(0); + //stopwords are not highlighted since not indexed + assertHighlight(hit, "title", 0, 1, equalTo("this is a test .")); - //stopwords are now highlighted since we used only whitespace analyzer here - assertHighlight(searchResponse, 0, "title.key", 0, 1, - equalTo("this is a test .")); - } + // search on title.key and highlight on title + searchResponse = client().prepareSearch() + .setQuery(matchQuery("title.key", "this is a test")) + .highlighter(new HighlightBuilder().field("title.key")).get(); + assertHitCount(searchResponse, 1L); + + //stopwords are now highlighted since we used only whitespace analyzer here + assertHighlight(searchResponse, 0, "title.key", 0, 1, + equalTo("this is a test .")); } public void testPostingsHighlighterMultiMapperFromSource() throws Exception { @@ -2376,22 +2254,20 @@ public void testPostingsHighlighterMultiMapperFromSource() throws Exception { client().prepareIndex("test", "type1", "1").setSource("title", "this is a test").get(); refresh(); - for (String type : UNIFIED_AND_NULL) { - // simple search on body with standard analyzer with a simple field query - SearchResponse searchResponse = client().prepareSearch() - .setQuery(matchQuery("title", "this is a test")) - .highlighter(new HighlightBuilder().field("title")) - .get(); + // simple search on body with standard analyzer with a simple field query + SearchResponse searchResponse = client().prepareSearch() + .setQuery(matchQuery("title", "this is a test")) + .highlighter(new HighlightBuilder().field("title")) + .get(); - assertHighlight(searchResponse, 0, "title", 0, 1, equalTo("this is a test")); + assertHighlight(searchResponse, 0, "title", 0, 1, equalTo("this is a test")); - // search on title.key and highlight on title.key - searchResponse = client().prepareSearch() - .setQuery(matchQuery("title.key", "this is a test")) - .highlighter(new HighlightBuilder().field("title.key").highlighterType(type)).get(); + // search on title.key and highlight on title.key + searchResponse = client().prepareSearch() + .setQuery(matchQuery("title.key", "this is a test")) + .highlighter(new HighlightBuilder().field("title.key")).get(); - assertHighlight(searchResponse, 0, "title.key", 0, 1, equalTo("this is a test")); - } + assertHighlight(searchResponse, 0, "title.key", 0, 1, equalTo("this is a test")); } public void testPostingsHighlighterShouldFailIfNoOffsets() throws Exception { @@ -2413,26 +2289,6 @@ public void testPostingsHighlighterShouldFailIfNoOffsets() throws Exception { .highlighter(new HighlightBuilder().field("title")) .get(); assertNoFailures(search); - - assertFailures(client().prepareSearch() - .setQuery(matchQuery("title", "this is a test")) - .highlighter(new HighlightBuilder().field("title").highlighterType("postings")), - RestStatus.BAD_REQUEST, - containsString("the field [title] should be indexed with positions and offsets in the " - + "postings list to be used with postings highlighter")); - - - assertFailures(client().prepareSearch() - .setQuery(matchQuery("title", "this is a test")) - .highlighter(new HighlightBuilder().field("title").highlighterType("postings")), - RestStatus.BAD_REQUEST, - containsString("the field [title] should be indexed with positions and offsets in the " - + "postings list to be used with postings highlighter")); - - //should not fail if there is a wildcard - assertNoFailures(client().prepareSearch() - .setQuery(matchQuery("title", "this is a test")) - .highlighter(new HighlightBuilder().field("tit*").highlighterType("postings")).get()); } public void testPostingsHighlighterBoostingQuery() throws IOException { @@ -2442,15 +2298,13 @@ public void testPostingsHighlighterBoostingQuery() throws IOException { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get(); refresh(); - for (String type : UNIFIED_AND_NULL) { - logger.info("--> highlighting and searching on field1"); - SearchSourceBuilder source = searchSource() - .query(boostingQuery(termQuery("field2", "brown"), termQuery("field2", "foobar")).negativeBoost(0.5f)) - .highlighter(highlight().field("field2").preTags("").postTags("").highlighterType(type)); - SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + logger.info("--> highlighting and searching on field1"); + SearchSourceBuilder source = searchSource() + .query(boostingQuery(termQuery("field2", "brown"), termQuery("field2", "foobar")).negativeBoost(0.5f)) + .highlighter(highlight().field("field2").preTags("").postTags("")); + SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet(); - assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog!")); - } + assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog!")); } public void testPostingsHighlighterCommonTermsQuery() throws IOException { @@ -2461,15 +2315,13 @@ public void testPostingsHighlighterCommonTermsQuery() throws IOException { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get(); refresh(); - for (String type : UNIFIED_AND_NULL) { - logger.info("--> highlighting and searching on field1"); - SearchSourceBuilder source = searchSource().query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100)) - .highlighter(highlight().field("field2").preTags("").postTags("").highlighterType(type)); - SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet(); - assertHitCount(searchResponse, 1L); + logger.info("--> highlighting and searching on field1"); + SearchSourceBuilder source = searchSource().query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100)) + .highlighter(highlight().field("field2").preTags("").postTags("")); + SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + assertHitCount(searchResponse, 1L); - assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog!")); - } + assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog!")); } private static XContentBuilder type1PostingsffsetsMapping() throws IOException { @@ -2490,12 +2342,10 @@ public void testPostingsHighlighterPrefixQuery() throws Exception { refresh(); logger.info("--> highlighting and searching on field2"); - for (String type : UNIFIED_AND_NULL) { - SearchSourceBuilder source = searchSource().query(prefixQuery("field2", "qui")) - .highlighter(highlight().field("field2").highlighterType(type)); - SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); - assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog!")); - } + SearchSourceBuilder source = searchSource().query(prefixQuery("field2", "qui")) + .highlighter(highlight().field("field2")); + SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); + assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog!")); } public void testPostingsHighlighterFuzzyQuery() throws Exception { @@ -2506,14 +2356,12 @@ public void testPostingsHighlighterFuzzyQuery() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get(); refresh(); - for (String type : UNIFIED_AND_NULL) { - logger.info("--> highlighting and searching on field2"); - SearchSourceBuilder source = searchSource().query(fuzzyQuery("field2", "quck")) - .highlighter(highlight().field("field2").highlighterType(type)); - SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); + logger.info("--> highlighting and searching on field2"); + SearchSourceBuilder source = searchSource().query(fuzzyQuery("field2", "quck")) + .highlighter(highlight().field("field2")); + SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); - assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog!")); - } + assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog!")); } public void testPostingsHighlighterRegexpQuery() throws Exception { @@ -2524,14 +2372,12 @@ public void testPostingsHighlighterRegexpQuery() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get(); refresh(); - for (String type : UNIFIED_AND_NULL) { - logger.info("--> highlighting and searching on field2"); - SearchSourceBuilder source = searchSource().query(regexpQuery("field2", "qu[a-l]+k")) - .highlighter(highlight().field("field2").highlighterType(type)); - SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); + logger.info("--> highlighting and searching on field2"); + SearchSourceBuilder source = searchSource().query(regexpQuery("field2", "qu[a-l]+k")) + .highlighter(highlight().field("field2")); + SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); - assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog!")); - } + assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog!")); } public void testPostingsHighlighterWildcardQuery() throws Exception { @@ -2542,21 +2388,19 @@ public void testPostingsHighlighterWildcardQuery() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get(); refresh(); - for (String type : UNIFIED_AND_NULL) { - logger.info("--> highlighting and searching on field2"); - SearchSourceBuilder source = searchSource().query(wildcardQuery("field2", "qui*")) - .highlighter(highlight().field("field2").highlighterType(type)); - SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); + logger.info("--> highlighting and searching on field2"); + SearchSourceBuilder source = searchSource().query(wildcardQuery("field2", "qui*")) + .highlighter(highlight().field("field2")); + SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); - assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog!")); + assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog!")); - source = searchSource().query(wildcardQuery("field2", "qu*k")) - .highlighter(highlight().field("field2").highlighterType(type)); - searchResponse = client().prepareSearch("test").setSource(source).get(); - assertHitCount(searchResponse, 1L); + source = searchSource().query(wildcardQuery("field2", "qu*k")) + .highlighter(highlight().field("field2")); + searchResponse = client().prepareSearch("test").setSource(source).get(); + assertHitCount(searchResponse, 1L); - assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog!")); - } + assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog!")); } public void testPostingsHighlighterTermRangeQuery() throws Exception { @@ -2566,14 +2410,12 @@ public void testPostingsHighlighterTermRangeQuery() throws Exception { client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "aaab").get(); refresh(); - for (String type : UNIFIED_AND_NULL) { - logger.info("--> highlighting and searching on field2"); - SearchSourceBuilder source = searchSource().query(rangeQuery("field2").gte("aaaa").lt("zzzz")) - .highlighter(highlight().field("field2").highlighterType(type)); - SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); + logger.info("--> highlighting and searching on field2"); + SearchSourceBuilder source = searchSource().query(rangeQuery("field2").gte("aaaa").lt("zzzz")) + .highlighter(highlight().field("field2")); + SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); - assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("aaab")); - } + assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("aaab")); } public void testPostingsHighlighterQueryString() throws Exception { @@ -2584,13 +2426,11 @@ public void testPostingsHighlighterQueryString() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get(); refresh(); - for (String type : UNIFIED_AND_NULL) { - logger.info("--> highlighting and searching on field2"); - SearchSourceBuilder source = searchSource().query(queryStringQuery("qui*").defaultField("field2")) - .highlighter(highlight().field("field2").highlighterType(type)); - SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); - assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog!")); - } + logger.info("--> highlighting and searching on field2"); + SearchSourceBuilder source = searchSource().query(queryStringQuery("qui*").defaultField("field2")) + .highlighter(highlight().field("field2")); + SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); + assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog!")); } public void testPostingsHighlighterRegexpQueryWithinConstantScoreQuery() throws Exception { @@ -2600,13 +2440,11 @@ public void testPostingsHighlighterRegexpQueryWithinConstantScoreQuery() throws client().prepareIndex("test", "type1").setSource("field1", "The photography word will get highlighted").get(); refresh(); - for (String type : UNIFIED_AND_NULL) { - logger.info("--> highlighting and searching on field1"); - SearchSourceBuilder source = searchSource().query(constantScoreQuery(regexpQuery("field1", "pho[a-z]+"))) - .highlighter(highlight().field("field1").highlighterType(type)); - SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); - assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The photography word will get highlighted")); - } + logger.info("--> highlighting and searching on field1"); + SearchSourceBuilder source = searchSource().query(constantScoreQuery(regexpQuery("field1", "pho[a-z]+"))) + .highlighter(highlight().field("field1")); + SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); + assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The photography word will get highlighted")); } public void testPostingsHighlighterMultiTermQueryMultipleLevels() throws Exception { @@ -2616,16 +2454,14 @@ public void testPostingsHighlighterMultiTermQueryMultipleLevels() throws Excepti client().prepareIndex("test", "type1").setSource("field1", "The photography word will get highlighted").get(); refresh(); - for (String type : UNIFIED_AND_NULL) { - logger.info("--> highlighting and searching on field1"); - SearchSourceBuilder source = searchSource().query(boolQuery() - .should(boolQuery().mustNot(QueryBuilders.existsQuery("field1"))) - .should(matchQuery("field1", "test")) - .should(constantScoreQuery(queryStringQuery("field1:photo*")))) - .highlighter(highlight().field("field1").highlighterType(type)); - SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); - assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The photography word will get highlighted")); - } + logger.info("--> highlighting and searching on field1"); + SearchSourceBuilder source = searchSource().query(boolQuery() + .should(boolQuery().mustNot(QueryBuilders.existsQuery("field1"))) + .should(matchQuery("field1", "test")) + .should(constantScoreQuery(queryStringQuery("field1:photo*")))) + .highlighter(highlight().field("field1")); + SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); + assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The photography word will get highlighted")); } public void testPostingsHighlighterPrefixQueryWithinBooleanQuery() throws Exception { @@ -2635,14 +2471,12 @@ public void testPostingsHighlighterPrefixQueryWithinBooleanQuery() throws Except client().prepareIndex("test", "type1").setSource("field1", "The photography word will get highlighted").get(); refresh(); - for (String type : UNIFIED_AND_NULL) { - logger.info("--> highlighting and searching on field1"); - SearchSourceBuilder source = searchSource() - .query(boolQuery().must(prefixQuery("field1", "photo")).should(matchQuery("field1", "test").minimumShouldMatch("0"))) - .highlighter(highlight().field("field1").highlighterType(type)); - SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); - assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The photography word will get highlighted")); - } + logger.info("--> highlighting and searching on field1"); + SearchSourceBuilder source = searchSource() + .query(boolQuery().must(prefixQuery("field1", "photo")).should(matchQuery("field1", "test").minimumShouldMatch("0"))) + .highlighter(highlight().field("field1")); + SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); + assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The photography word will get highlighted")); } public void testPostingsHighlighterQueryStringWithinFilteredQuery() throws Exception { @@ -2652,15 +2486,13 @@ public void testPostingsHighlighterQueryStringWithinFilteredQuery() throws Excep client().prepareIndex("test", "type1").setSource("field1", "The photography word will get highlighted").get(); refresh(); - for (String type : UNIFIED_AND_NULL) { - logger.info("--> highlighting and searching on field1"); - SearchSourceBuilder source = searchSource().query(boolQuery() - .must(queryStringQuery("field1:photo*")) - .mustNot(existsQuery("field_null"))) - .highlighter(highlight().field("field1").highlighterType(type)); - SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); - assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The photography word will get highlighted")); - } + logger.info("--> highlighting and searching on field1"); + SearchSourceBuilder source = searchSource().query(boolQuery() + .must(queryStringQuery("field1:photo*")) + .mustNot(existsQuery("field_null"))) + .highlighter(highlight().field("field1")); + SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); + assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The photography word will get highlighted")); } public void testPostingsHighlighterManyDocs() throws Exception { @@ -2677,25 +2509,23 @@ public void testPostingsHighlighterManyDocs() throws Exception { String prefix = randomAlphaOfLengthBetween(5, 30); prefixes.put(String.valueOf(i), prefix); indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field1", "Sentence " + prefix - + " test. Sentence two."); + + " test. Sentence two."); } logger.info("--> indexing docs"); indexRandom(true, indexRequestBuilders); - for (String type : UNIFIED_AND_NULL) { - logger.info("--> searching explicitly on field1 and highlighting on it"); - SearchRequestBuilder searchRequestBuilder = client().prepareSearch() - .setSize(COUNT) - .setQuery(termQuery("field1", "test")) - .highlighter(new HighlightBuilder().field("field1").highlighterType(type)); - SearchResponse searchResponse = - searchRequestBuilder.get(); - assertHitCount(searchResponse, COUNT); - assertThat(searchResponse.getHits().getHits().length, equalTo(COUNT)); - for (SearchHit hit : searchResponse.getHits()) { - String prefix = prefixes.get(hit.getId()); - assertHighlight(hit, "field1", 0, 1, equalTo("Sentence " + prefix + " test.")); - } + logger.info("--> searching explicitly on field1 and highlighting on it"); + SearchRequestBuilder searchRequestBuilder = client().prepareSearch() + .setSize(COUNT) + .setQuery(termQuery("field1", "test")) + .highlighter(new HighlightBuilder().field("field1")); + SearchResponse searchResponse = + searchRequestBuilder.get(); + assertHitCount(searchResponse, COUNT); + assertThat(searchResponse.getHits().getHits().length, equalTo(COUNT)); + for (SearchHit hit : searchResponse.getHits()) { + String prefix = prefixes.get(hit.getId()); + assertHighlight(hit, "field1", 0, 1, equalTo("Sentence " + prefix + " test.")); } } @@ -2741,11 +2571,6 @@ public void testFastVectorHighlighterPhraseBoost() throws Exception { phraseBoostTestCase("fvh"); } - public void testPostingsHighlighterPhraseBoost() throws Exception { - assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping())); - phraseBoostTestCase("postings"); - } - /** * Test phrase boosting over normal term matches. Note that this will never pass with the plain highlighter * because it doesn't support the concept of terms having a different weight based on position. @@ -2837,7 +2662,7 @@ public void testGeoFieldHighlightingWithDifferentHighlighters() throws IOExcepti .setSource(jsonBuilder().startObject().field("text", "Arbitrary text field which will should not cause a failure").endObject()) .get(); refresh(); - String highlighterType = randomFrom("plain", "fvh", "postings", "unified"); + String highlighterType = randomFrom(ALL_TYPES); QueryBuilder query = QueryBuilders.boolQuery().should(QueryBuilders.geoBoundingBoxQuery("geo_point") .setCorners(61.10078883158897, -170.15625, -64.92354174306496, 118.47656249999999)) .should(QueryBuilders.termQuery("text", "failure")); @@ -2958,17 +2783,15 @@ public void testFunctionScoreQueryHighlight() throws Exception { .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); - for (String type : UNIFIED_AND_NULL) { - SearchResponse searchResponse = client().prepareSearch() - .setQuery(new FunctionScoreQueryBuilder(QueryBuilders.prefixQuery("text", "bro"))) - .highlighter(new HighlightBuilder() - .field(new Field("text")).highlighterType(type)) - .get(); - assertHitCount(searchResponse, 1); - HighlightField field = searchResponse.getHits().getAt(0).getHighlightFields().get("text"); - assertThat(field.getFragments().length, equalTo(1)); - assertThat(field.getFragments()[0].string(), equalTo("brown")); - } + SearchResponse searchResponse = client().prepareSearch() + .setQuery(new FunctionScoreQueryBuilder(QueryBuilders.prefixQuery("text", "bro"))) + .highlighter(new HighlightBuilder() + .field(new Field("text"))) + .get(); + assertHitCount(searchResponse, 1); + HighlightField field = searchResponse.getHits().getAt(0).getHighlightFields().get("text"); + assertThat(field.getFragments().length, equalTo(1)); + assertThat(field.getFragments()[0].string(), equalTo("brown")); } public void testFiltersFunctionScoreQueryHighlight() throws Exception { @@ -2980,18 +2803,16 @@ public void testFiltersFunctionScoreQueryHighlight() throws Exception { new FunctionScoreQueryBuilder.FilterFunctionBuilder(QueryBuilders.termQuery("enable", "yes"), new RandomScoreFunctionBuilder()); - for (String type : UNIFIED_AND_NULL) { - SearchResponse searchResponse = client().prepareSearch() - .setQuery(new FunctionScoreQueryBuilder(QueryBuilders.prefixQuery("text", "bro"), - new FunctionScoreQueryBuilder.FilterFunctionBuilder[]{filterBuilder})) - .highlighter(new HighlightBuilder() - .field(new Field("text")).highlighterType(type)) - .get(); - assertHitCount(searchResponse, 1); - HighlightField field = searchResponse.getHits().getAt(0).getHighlightFields().get("text"); - assertThat(field.getFragments().length, equalTo(1)); - assertThat(field.getFragments()[0].string(), equalTo("brown")); - } + SearchResponse searchResponse = client().prepareSearch() + .setQuery(new FunctionScoreQueryBuilder(QueryBuilders.prefixQuery("text", "bro"), + new FunctionScoreQueryBuilder.FilterFunctionBuilder[]{filterBuilder})) + .highlighter(new HighlightBuilder() + .field(new Field("text"))) + .get(); + assertHitCount(searchResponse, 1); + HighlightField field = searchResponse.getHits().getAt(0).getHighlightFields().get("text"); + assertThat(field.getFragments().length, equalTo(1)); + assertThat(field.getFragments()[0].string(), equalTo("brown")); } public void testSynonyms() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/search/nested/SimpleNestedIT.java b/core/src/test/java/org/elasticsearch/search/nested/SimpleNestedIT.java index 316e83ad1bbcf..3e4792690ad5e 100644 --- a/core/src/test/java/org/elasticsearch/search/nested/SimpleNestedIT.java +++ b/core/src/test/java/org/elasticsearch/search/nested/SimpleNestedIT.java @@ -57,9 +57,7 @@ public class SimpleNestedIT extends ESIntegTestCase { public void testSimpleNested() throws Exception { assertAcked(prepareCreate("test") - .setSettings("index.mapping.single_type", false) - .addMapping("type1", "nested1", "type=nested") - .addMapping("type2", "nested1", "type=nested")); + .addMapping("type1", "nested1", "type=nested")); ensureGreen(); // check on no data, see it works @@ -158,10 +156,6 @@ public void testSimpleNested() throws Exception { searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)).execute().actionGet(); assertNoFailures(searchResponse); assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); - - searchResponse = client().prepareSearch("test").setTypes("type1", "type2").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)).execute().actionGet(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); } public void testMultiNested() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/search/profile/ProfileResultTests.java b/core/src/test/java/org/elasticsearch/search/profile/ProfileResultTests.java index 77b41b062d3b9..5174267815b84 100644 --- a/core/src/test/java/org/elasticsearch/search/profile/ProfileResultTests.java +++ b/core/src/test/java/org/elasticsearch/search/profile/ProfileResultTests.java @@ -33,9 +33,11 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.function.Predicate; import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; public class ProfileResultTests extends ESTestCase { @@ -62,12 +64,32 @@ public static ProfileResult createTestItem(int depth) { } public void testFromXContent() throws IOException { + doFromXContentTestWithRandomFields(false); + } + + /** + * This test adds random fields and objects to the xContent rendered out to ensure we can parse it + * back to be forward compatible with additions to the xContent + */ + public void testFromXContentWithRandomFields() throws IOException { + doFromXContentTestWithRandomFields(true); + } + + private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException { ProfileResult profileResult = createTestItem(2); XContentType xContentType = randomFrom(XContentType.values()); boolean humanReadable = randomBoolean(); BytesReference originalBytes = toShuffledXContent(profileResult, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); + BytesReference mutated; + if (addRandomFields) { + // "breakdown" just consists of key/value pairs, we shouldn't add anything random there + Predicate excludeFilter = (s) -> s.endsWith(ProfileResult.BREAKDOWN.getPreferredName()); + mutated = insertRandomFields(xContentType, originalBytes, excludeFilter, random()); + } else { + mutated = originalBytes; + } ProfileResult parsed; - try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { + try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); parsed = ProfileResult.fromXContent(parser); assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); diff --git a/core/src/test/java/org/elasticsearch/search/profile/SearchProfileShardResultsTests.java b/core/src/test/java/org/elasticsearch/search/profile/SearchProfileShardResultsTests.java index 853e7cd13a337..7bc9b18860641 100644 --- a/core/src/test/java/org/elasticsearch/search/profile/SearchProfileShardResultsTests.java +++ b/core/src/test/java/org/elasticsearch/search/profile/SearchProfileShardResultsTests.java @@ -34,10 +34,12 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.function.Predicate; import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureFieldName; +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent;; public class SearchProfileShardResultsTests extends ESTestCase { @@ -58,20 +60,43 @@ public static SearchProfileShardResults createTestItem() { } public void testFromXContent() throws IOException { + doFromXContentTestWithRandomFields(false); + } + + /** + * This test adds random fields and objects to the xContent rendered out to ensure we can parse it + * back to be forward compatible with additions to the xContent + */ + public void testFromXContentWithRandomFields() throws IOException { + doFromXContentTestWithRandomFields(true); + } + + private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException { SearchProfileShardResults shardResult = createTestItem(); XContentType xContentType = randomFrom(XContentType.values()); boolean humanReadable = randomBoolean(); BytesReference originalBytes = toShuffledXContent(shardResult, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); + BytesReference mutated; + if (addRandomFields) { + // The ProfileResults "breakdown" section just consists of key/value pairs, we shouldn't add anything random there + // also we don't want to insert into the root object here, its just the PROFILE_FIELD itself + Predicate excludeFilter = (s) -> (s.isEmpty() || s.endsWith(ProfileResult.BREAKDOWN.getPreferredName())); + mutated = insertRandomFields(xContentType, originalBytes, excludeFilter, random()); + } else { + mutated = originalBytes; + } SearchProfileShardResults parsed; - try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { + try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { ensureExpectedToken(parser.nextToken(), XContentParser.Token.START_OBJECT, parser::getTokenLocation); ensureFieldName(parser, parser.nextToken(), SearchProfileShardResults.PROFILE_FIELD); ensureExpectedToken(parser.nextToken(), XContentParser.Token.START_OBJECT, parser::getTokenLocation); parsed = SearchProfileShardResults.fromXContent(parser); + assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); assertNull(parser.nextToken()); } assertToXContentEquivalent(originalBytes, toXContent(parsed, xContentType, humanReadable), xContentType); + } } diff --git a/core/src/test/java/org/elasticsearch/search/profile/query/CollectorResultTests.java b/core/src/test/java/org/elasticsearch/search/profile/query/CollectorResultTests.java index 8d87f193607f5..10bf8e2a30013 100644 --- a/core/src/test/java/org/elasticsearch/search/profile/query/CollectorResultTests.java +++ b/core/src/test/java/org/elasticsearch/search/profile/query/CollectorResultTests.java @@ -34,6 +34,7 @@ import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; public class CollectorResultTests extends ESTestCase { @@ -57,18 +58,30 @@ public static CollectorResult createTestItem(int depth) { } public void testFromXContent() throws IOException { + doFromXContentTestWithRandomFields(false); + } + + public void testFromXContentWithRandomFields() throws IOException { + doFromXContentTestWithRandomFields(true); + } + + private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException { CollectorResult collectorResult = createTestItem(1); XContentType xContentType = randomFrom(XContentType.values()); boolean humanReadable = randomBoolean(); BytesReference originalBytes = toShuffledXContent(collectorResult, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); - - CollectorResult parsed; - try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { + BytesReference mutated; + if (addRandomFields) { + mutated = insertRandomFields(xContentType, originalBytes, null, random()); + } else { + mutated = originalBytes; + } + try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); - parsed = CollectorResult.fromXContent(parser); + CollectorResult parsed = CollectorResult.fromXContent(parser); assertNull(parser.nextToken()); + assertToXContentEquivalent(originalBytes, toXContent(parsed, xContentType, humanReadable), xContentType); } - assertToXContentEquivalent(originalBytes, toXContent(parsed, xContentType, humanReadable), xContentType); } public void testToXContent() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java b/core/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java index 65aa5f992e635..2633cb706e0cc 100644 --- a/core/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java +++ b/core/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java @@ -21,7 +21,9 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field.Store; +import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.StringField; +import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; @@ -29,19 +31,30 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; +import org.apache.lucene.queries.MinDocQuery; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Collector; import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.index.query.ParsedQuery; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.internal.ScrollContext; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.sort.SortAndFormats; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TestSearchContext; @@ -49,6 +62,14 @@ import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.nullValue; + public class QueryPhaseTests extends ESTestCase { private void countTestCase(Query query, IndexReader reader, boolean shouldCollect) throws Exception { @@ -66,7 +87,7 @@ protected void search(List leaves, Weight weight, Collector c } }; - final boolean rescore = QueryPhase.execute(context, contextSearcher); + final boolean rescore = QueryPhase.execute(context, contextSearcher, null); assertFalse(rescore); assertEquals(searcher.count(query), context.queryResult().topDocs().totalHits); assertEquals(shouldCollect, collected.get()); @@ -135,12 +156,12 @@ protected void search(List leaves, Weight weight, Collector c } }; - QueryPhase.execute(context, contextSearcher); + QueryPhase.execute(context, contextSearcher, null); assertEquals(0, context.queryResult().topDocs().totalHits); assertFalse(collected.get()); context.parsedPostFilter(new ParsedQuery(new MatchNoDocsQuery())); - QueryPhase.execute(context, contextSearcher); + QueryPhase.execute(context, contextSearcher, null); assertEquals(0, context.queryResult().topDocs().totalHits); assertTrue(collected.get()); } @@ -159,14 +180,264 @@ protected void search(List leaves, Weight weight, Collector c } }; - QueryPhase.execute(context, contextSearcher); + QueryPhase.execute(context, contextSearcher, null); assertEquals(0, context.queryResult().topDocs().totalHits); assertFalse(collected.get()); context.minimumScore(1); - QueryPhase.execute(context, contextSearcher); + QueryPhase.execute(context, contextSearcher, null); assertEquals(0, context.queryResult().topDocs().totalHits); assertTrue(collected.get()); } + public void testInOrderScrollOptimization() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig() + .setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + final int numDocs = scaledRandomIntBetween(100, 200); + for (int i = 0; i < numDocs; ++i) { + w.addDocument(new Document()); + } + w.close(); + final AtomicBoolean collected = new AtomicBoolean(); + IndexReader reader = DirectoryReader.open(dir); + IndexSearcher contextSearcher = new IndexSearcher(reader) { + protected void search(List leaves, Weight weight, Collector collector) throws IOException { + collected.set(true); + super.search(leaves, weight, collector); + } + }; + + TestSearchContext context = new TestSearchContext(null); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + ScrollContext scrollContext = new ScrollContext(); + scrollContext.lastEmittedDoc = null; + scrollContext.maxScore = Float.NaN; + scrollContext.totalHits = -1; + context.scrollContext(scrollContext); + context.setTask(new SearchTask(123L, "", "", "", null)); + context.setSize(10); + + QueryPhase.execute(context, contextSearcher, null); + assertThat(context.queryResult().topDocs().totalHits, equalTo(numDocs)); + assertTrue(collected.get()); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.terminateAfter(), equalTo(0)); + assertThat(context.queryResult().getTotalHits(), equalTo(numDocs)); + + QueryPhase.execute(context, contextSearcher, null); + assertThat(context.queryResult().topDocs().totalHits, equalTo(numDocs)); + assertTrue(collected.get()); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.terminateAfter(), equalTo(10)); + assertThat(context.queryResult().getTotalHits(), equalTo(numDocs)); + assertThat(context.queryResult().topDocs().scoreDocs[0].doc, greaterThanOrEqualTo(10)); + reader.close(); + dir.close(); + } + + public void testTerminateAfterEarlyTermination() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = newIndexWriterConfig(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + final int numDocs = scaledRandomIntBetween(100, 200); + for (int i = 0; i < numDocs; ++i) { + Document doc = new Document(); + if (randomBoolean()) { + doc.add(new StringField("foo", "bar", Store.NO)); + } + if (randomBoolean()) { + doc.add(new StringField("foo", "baz", Store.NO)); + } + doc.add(new NumericDocValuesField("rank", numDocs - i)); + w.addDocument(doc); + } + w.close(); + TestSearchContext context = new TestSearchContext(null); + context.setTask(new SearchTask(123L, "", "", "", null)); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + context.terminateAfter(1); + + final AtomicBoolean collected = new AtomicBoolean(); + final IndexReader reader = DirectoryReader.open(dir); + IndexSearcher contextSearcher = new IndexSearcher(reader) { + protected void search(List leaves, Weight weight, Collector collector) throws IOException { + collected.set(true); + super.search(leaves, weight, collector); + } + }; + + { + context.setSize(1); + QueryPhase.execute(context, contextSearcher, null); + assertTrue(collected.get()); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().totalHits, equalTo(1)); + assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); + + context.setSize(0); + QueryPhase.execute(context, contextSearcher, null); + assertTrue(collected.get()); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().totalHits, equalTo(1)); + assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(0)); + } + + { + context.setSize(1); + QueryPhase.execute(context, contextSearcher, null); + assertTrue(collected.get()); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().totalHits, equalTo(1)); + assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); + } + { + context.setSize(1); + BooleanQuery bq = new BooleanQuery.Builder() + .add(new TermQuery(new Term("foo", "bar")), Occur.SHOULD) + .add(new TermQuery(new Term("foo", "baz")), Occur.SHOULD) + .build(); + context.parsedQuery(new ParsedQuery(bq)); + collected.set(false); + QueryPhase.execute(context, contextSearcher, null); + assertTrue(collected.get()); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().totalHits, equalTo(1)); + assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); + + context.setSize(0); + context.parsedQuery(new ParsedQuery(bq)); + collected.set(false); + QueryPhase.execute(context, contextSearcher, null); + assertTrue(collected.get()); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().totalHits, equalTo(1)); + assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(0)); + } + { + context.setSize(1); + collected.set(false); + TotalHitCountCollector collector = new TotalHitCountCollector(); + context.queryCollectors().put(TotalHitCountCollector.class, collector); + QueryPhase.execute(context, contextSearcher, null); + assertTrue(collected.get()); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().totalHits, equalTo(1)); + assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); + } + { + context.setSize(0); + collected.set(false); + TotalHitCountCollector collector = new TotalHitCountCollector(); + context.queryCollectors().put(TotalHitCountCollector.class, collector); + QueryPhase.execute(context, contextSearcher, null); + assertTrue(collected.get()); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().totalHits, equalTo(1)); + assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(0)); + assertThat(collector.getTotalHits(), equalTo(1)); + } + + reader.close(); + dir.close(); + } + + public void testIndexSortingEarlyTermination() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig() + .setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + final int numDocs = scaledRandomIntBetween(100, 200); + for (int i = 0; i < numDocs; ++i) { + Document doc = new Document(); + if (randomBoolean()) { + doc.add(new StringField("foo", "bar", Store.NO)); + } + if (randomBoolean()) { + doc.add(new StringField("foo", "baz", Store.NO)); + } + doc.add(new NumericDocValuesField("rank", numDocs - i)); + w.addDocument(doc); + } + w.close(); + + TestSearchContext context = new TestSearchContext(null); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + context.setSize(1); + context.setTask(new SearchTask(123L, "", "", "", null)); + context.sort(new SortAndFormats(sort, new DocValueFormat[] {DocValueFormat.RAW})); + + final AtomicBoolean collected = new AtomicBoolean(); + final IndexReader reader = DirectoryReader.open(dir); + IndexSearcher contextSearcher = new IndexSearcher(reader) { + protected void search(List leaves, Weight weight, Collector collector) throws IOException { + collected.set(true); + super.search(leaves, weight, collector); + } + }; + QueryPhase.execute(context, contextSearcher, sort); + assertTrue(collected.get()); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().totalHits, equalTo(numDocs)); + assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().scoreDocs[0], instanceOf(FieldDoc.class)); + FieldDoc fieldDoc = (FieldDoc) context.queryResult().topDocs().scoreDocs[0]; + assertThat(fieldDoc.fields[0], equalTo(1)); + + + { + collected.set(false); + context.parsedPostFilter(new ParsedQuery(new MinDocQuery(1))); + QueryPhase.execute(context, contextSearcher, sort); + assertTrue(collected.get()); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().totalHits, equalTo(numDocs - 1)); + assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().scoreDocs[0], instanceOf(FieldDoc.class)); + assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); + context.parsedPostFilter(null); + + final TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector(); + context.queryCollectors().put(TotalHitCountCollector.class, totalHitCountCollector); + collected.set(false); + QueryPhase.execute(context, contextSearcher, sort); + assertTrue(collected.get()); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().totalHits, equalTo(numDocs)); + assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().scoreDocs[0], instanceOf(FieldDoc.class)); + assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); + assertThat(totalHitCountCollector.getTotalHits(), equalTo(numDocs)); + context.queryCollectors().clear(); + } + + { + collected.set(false); + context.trackTotalHits(false); + QueryPhase.execute(context, contextSearcher, sort); + assertTrue(collected.get()); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().totalHits, lessThan(numDocs)); + assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().scoreDocs[0], instanceOf(FieldDoc.class)); + assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); + + final TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector(); + context.queryCollectors().put(TotalHitCountCollector.class, totalHitCountCollector); + collected.set(false); + QueryPhase.execute(context, contextSearcher, sort); + assertTrue(collected.get()); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().totalHits, lessThan(numDocs)); + assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().scoreDocs[0], instanceOf(FieldDoc.class)); + assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); + assertThat(totalHitCountCollector.getTotalHits(), equalTo(numDocs)); + } + reader.close(); + dir.close(); + } } diff --git a/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java b/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java index 58c0bf82e98d1..f22ec392b9953 100644 --- a/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java @@ -120,7 +120,6 @@ public void testSimpleQueryString() throws ExecutionException, InterruptedExcept assertSearchHits(searchResponse, "5", "6"); } - @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/23966") public void testSimpleQueryStringMinimumShouldMatch() throws Exception { createIndex("test"); ensureGreen("test"); diff --git a/core/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java b/core/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java index f63f13b6dd24b..c4bb4a811a51b 100644 --- a/core/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java @@ -30,7 +30,9 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchContextException; import org.elasticsearch.search.rescore.QueryRescorerBuilder; +import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -51,6 +53,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.lessThanOrEqualTo; public class SimpleSearchIT extends ESIntegTestCase { @@ -285,7 +289,50 @@ public void testSimpleTerminateAfterCount() throws Exception { .setTerminateAfter(2 * max).execute().actionGet(); assertHitCount(searchResponse, max); - assertFalse(searchResponse.isTerminatedEarly()); + assertNull(searchResponse.isTerminatedEarly()); + } + + public void testSimpleIndexSortEarlyTerminate() throws Exception { + prepareCreate("test") + .setSettings(Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.sort.field", "rank") + ) + .addMapping("type1", "rank", "type=integer") + .get(); + ensureGreen(); + int max = randomIntBetween(3, 29); + List docbuilders = new ArrayList<>(max); + + for (int i = max-1; i >= 0; i--) { + String id = String.valueOf(i); + docbuilders.add(client().prepareIndex("test", "type1", id).setSource("rank", i)); + } + + indexRandom(true, docbuilders); + ensureGreen(); + refresh(); + + SearchResponse searchResponse; + boolean hasEarlyTerminated = false; + for (int i = 1; i < max; i++) { + searchResponse = client().prepareSearch("test") + .addDocValueField("rank") + .setTrackTotalHits(false) + .addSort("rank", SortOrder.ASC) + .setSize(i).execute().actionGet(); + assertThat(searchResponse.getHits().getTotalHits(), equalTo(-1L)); + if (searchResponse.isTerminatedEarly() != null) { + assertTrue(searchResponse.isTerminatedEarly()); + hasEarlyTerminated = true; + } + for (int j = 0; j < i; j++) { + assertThat(searchResponse.getHits().getAt(j).field("rank").getValue(), + equalTo((long) j)); + } + } + assertTrue(hasEarlyTerminated); } public void testInsaneFromAndSize() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java index 2ed8857f1220d..121085f34d79d 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java @@ -158,7 +158,7 @@ public void testParseJson() throws IOException { "\"_script\" : {\n" + "\"type\" : \"number\",\n" + "\"script\" : {\n" + - "\"inline\": \"doc['field_name'].value * factor\",\n" + + "\"source\": \"doc['field_name'].value * factor\",\n" + "\"params\" : {\n" + "\"factor\" : 1.1\n" + "}\n" + diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestionOptionTests.java b/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestionOptionTests.java index 18a24e0011679..df4c6898cc7d6 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestionOptionTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestionOptionTests.java @@ -36,8 +36,10 @@ import java.util.HashSet; import java.util.Map; import java.util.Set; +import java.util.function.Predicate; import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; public class CompletionSuggestionOptionTests extends ESTestCase { @@ -67,17 +69,31 @@ public static Option createTestItem() { } public void testFromXContent() throws IOException { + doTestFromXContent(false); + } + + public void testFromXContentWithRandomFields() throws IOException { + doTestFromXContent(true); + } + + private void doTestFromXContent(boolean addRandomFields) throws IOException { Option option = createTestItem(); XContentType xContentType = randomFrom(XContentType.values()); boolean humanReadable = randomBoolean(); BytesReference originalBytes = toShuffledXContent(option, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); - if (randomBoolean()) { - try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { - originalBytes = shuffleXContent(parser, randomBoolean()).bytes(); - } + BytesReference mutated; + if (addRandomFields) { + // "contexts" is an object consisting of key/array pairs, we shouldn't add anything random there + // also there can be inner search hits fields inside this option, we need to exclude another couple of paths + // where we cannot add random stuff + Predicate excludeFilter = (path) -> (path.endsWith(CompletionSuggestion.Entry.Option.CONTEXTS.getPreferredName()) + || path.endsWith("highlight") || path.endsWith("fields") || path.contains("_source") || path.contains("inner_hits")); + mutated = insertRandomFields(xContentType, originalBytes, excludeFilter, random()); + } else { + mutated = originalBytes; } Option parsed; - try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { + try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { parsed = Option.fromXContent(parser); assertNull(parser.nextToken()); } diff --git a/core/src/test/java/org/elasticsearch/search/suggest/SuggestTests.java b/core/src/test/java/org/elasticsearch/search/suggest/SuggestTests.java index c565836adb6ac..8c938caa479e9 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/SuggestTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/SuggestTests.java @@ -90,7 +90,7 @@ public void testFromXContent() throws IOException { Suggest suggest = createTestItem(); XContentType xContentType = randomFrom(XContentType.values()); boolean humanReadable = randomBoolean(); - BytesReference originalBytes = toXContent(suggest, xContentType, params, humanReadable); + BytesReference originalBytes = toShuffledXContent(suggest, xContentType, params, humanReadable); Suggest parsed; try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); diff --git a/core/src/test/java/org/elasticsearch/search/suggest/SuggestionEntryTests.java b/core/src/test/java/org/elasticsearch/search/suggest/SuggestionEntryTests.java index 770fd2f6e6c6a..46971a5537b26 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/SuggestionEntryTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/SuggestionEntryTests.java @@ -36,10 +36,12 @@ import java.util.HashMap; import java.util.Map; import java.util.function.Function; +import java.util.function.Predicate; import java.util.function.Supplier; import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; public class SuggestionEntryTests extends ESTestCase { @@ -80,15 +82,35 @@ public static Entry createTestItem(Class return entry; } - @SuppressWarnings("unchecked") public void testFromXContent() throws IOException { + doTestFromXContent(false); + } + + public void testFromXContentWithRandomFields() throws IOException { + doTestFromXContent(true); + } + + @SuppressWarnings("unchecked") + private void doTestFromXContent(boolean addRandomFields) throws IOException { for (Class entryType : ENTRY_PARSERS.keySet()) { Entry