Skip to content

Commit

Permalink
Merge remote-tracking branch 'elastic/master' into retention-lease-ccr
Browse files Browse the repository at this point in the history
* elastic/master:
  Ensure global test seed is used for all random testing tasks (elastic#38991)
  re-mutes SmokeTestWatcherWithSecurityIT (elastic#38995)
  Rollup jobs should be cleaned up before indices are deleted (elastic#38930)
  relax ML Info Docs expected response (elastic#38993)
  Re-enable single node tests (elastic#38852)
  ClusterClientIT refactor (elastic#38872)
  Fix typo in Index API doc
  Edits to text & formatting in Term Suggester doc (elastic#38963) (elastic#38989)
  Migrate Streamable to Writeable for WatchStatus (elastic#37390)
  • Loading branch information
jasontedor committed Feb 16, 2019
2 parents fc9a300 + 05a3108 commit 2c77ad4
Show file tree
Hide file tree
Showing 14 changed files with 93 additions and 87 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,14 @@ package com.carrotsearch.gradle.junit4
import com.carrotsearch.ant.tasks.junit4.JUnit4
import org.gradle.api.Plugin
import org.gradle.api.Project
import org.gradle.api.Task
import org.gradle.api.tasks.TaskContainer

class RandomizedTestingPlugin implements Plugin<Project> {

void apply(Project project) {
setupSeed(project)
String seed = setupSeed(project)
createUnitTestTask(project.tasks)
configureAnt(project.ant)
configureAnt(project.ant, seed)
}

/**
Expand All @@ -21,12 +20,12 @@ class RandomizedTestingPlugin implements Plugin<Project> {
* outcome of subsequent runs. Pinning the seed up front like this makes
* the reproduction line from one run be useful on another run.
*/
static void setupSeed(Project project) {
static String setupSeed(Project project) {
if (project.rootProject.ext.has('testSeed')) {
/* Skip this if we've already pinned the testSeed. It is important
* that this checks the rootProject so that we know we've only ever
* initialized one time. */
return
return project.rootProject.ext.testSeed
}
String testSeed = System.getProperty('tests.seed')
if (testSeed == null) {
Expand All @@ -39,6 +38,8 @@ class RandomizedTestingPlugin implements Plugin<Project> {
project.rootProject.subprojects {
project.ext.testSeed = testSeed
}

return testSeed
}

static void createUnitTestTask(TaskContainer tasks) {
Expand All @@ -52,7 +53,8 @@ class RandomizedTestingPlugin implements Plugin<Project> {
}
}

static void configureAnt(AntBuilder ant) {
static void configureAnt(AntBuilder ant, String seed) {
ant.project.addTaskDefinition('junit4:junit4', JUnit4.class)
ant.properties.put('tests.seed', seed)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -165,10 +165,8 @@ public void testClusterHealthGreen() throws IOException {
assertThat(response.isTimedOut(), equalTo(false));
assertThat(response.status(), equalTo(RestStatus.OK));
assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN));
assertNoIndices(response);
}

@AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/35450")
public void testClusterHealthYellowClusterLevel() throws IOException {
createIndex("index", Settings.EMPTY);
createIndex("index2", Settings.EMPTY);
Expand All @@ -178,15 +176,21 @@ public void testClusterHealthYellowClusterLevel() throws IOException {

logger.info("Shard stats\n{}", EntityUtils.toString(
client().performRequest(new Request("GET", "/_cat/shards")).getEntity()));
assertYellowShards(response);
assertThat(response.getIndices().size(), equalTo(0));
}

@AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/35450")
public void testClusterHealthYellowIndicesLevel() throws IOException {
createIndex("index", Settings.EMPTY);
createIndex("index2", Settings.EMPTY);
ClusterHealthRequest request = new ClusterHealthRequest();
String firstIndex = "index";
String secondIndex = "index2";
// including another index that we do not assert on, to ensure that we are not
// accidentally asserting on entire cluster state
String ignoredIndex = "tasks";
createIndex(firstIndex, Settings.EMPTY);
createIndex(secondIndex, Settings.EMPTY);
if (randomBoolean()) {
createIndex(ignoredIndex, Settings.EMPTY);
}
ClusterHealthRequest request = new ClusterHealthRequest(firstIndex, secondIndex);
request.timeout("5s");
request.level(ClusterHealthRequest.Level.INDICES);
ClusterHealthResponse response = execute(request, highLevelClient().cluster()::health, highLevelClient().cluster()::healthAsync);
Expand All @@ -212,11 +216,9 @@ private static void assertYellowShards(ClusterHealthResponse response) {
assertThat(response.getDelayedUnassignedShards(), equalTo(0));
assertThat(response.getInitializingShards(), equalTo(0));
assertThat(response.getUnassignedShards(), equalTo(2));
assertThat(response.getActiveShardsPercent(), equalTo(50d));
}


@AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/35450")

public void testClusterHealthYellowSpecificIndex() throws IOException {
createIndex("index", Settings.EMPTY);
createIndex("index2", Settings.EMPTY);
Expand All @@ -236,7 +238,6 @@ public void testClusterHealthYellowSpecificIndex() throws IOException {
assertThat(response.getDelayedUnassignedShards(), equalTo(0));
assertThat(response.getInitializingShards(), equalTo(0));
assertThat(response.getUnassignedShards(), equalTo(1));
assertThat(response.getActiveShardsPercent(), equalTo(50d));
assertThat(response.getIndices().size(), equalTo(1));
Map.Entry<String, ClusterIndexHealth> index = response.getIndices().entrySet().iterator().next();
assertYellowIndex(index.getKey(), index.getValue(), false);
Expand Down Expand Up @@ -272,7 +273,19 @@ private static void assertYellowShard(int shardId, ClusterShardHealth shardHealt
assertThat(shardHealth.getRelocatingShards(), equalTo(0));
}

private static void assertNoIndices(ClusterHealthResponse response) {
assertThat(response.getIndices(), equalTo(emptyMap()));
assertThat(response.getActivePrimaryShards(), equalTo(0));
assertThat(response.getNumberOfDataNodes(), equalTo(1));
assertThat(response.getNumberOfNodes(), equalTo(1));
assertThat(response.getActiveShards(), equalTo(0));
assertThat(response.getDelayedUnassignedShards(), equalTo(0));
assertThat(response.getInitializingShards(), equalTo(0));
assertThat(response.getUnassignedShards(), equalTo(0));
}

public void testClusterHealthNotFoundIndex() throws IOException {
createIndex("index", Settings.EMPTY);
ClusterHealthRequest request = new ClusterHealthRequest("notexisted-index");
request.timeout("5s");
ClusterHealthResponse response = execute(request, highLevelClient().cluster()::health, highLevelClient().cluster()::healthAsync);
Expand All @@ -284,15 +297,4 @@ public void testClusterHealthNotFoundIndex() throws IOException {
assertNoIndices(response);
}

private static void assertNoIndices(ClusterHealthResponse response) {
assertThat(response.getIndices(), equalTo(emptyMap()));
assertThat(response.getActivePrimaryShards(), equalTo(0));
assertThat(response.getNumberOfDataNodes(), equalTo(1));
assertThat(response.getNumberOfNodes(), equalTo(1));
assertThat(response.getActiveShards(), equalTo(0));
assertThat(response.getDelayedUnassignedShards(), equalTo(0));
assertThat(response.getInitializingShards(), equalTo(0));
assertThat(response.getUnassignedShards(), equalTo(0));
assertThat(response.getActiveShardsPercent(), equalTo(100d));
}
}
2 changes: 1 addition & 1 deletion docs/reference/docs/index_.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ exist, and applies any <<indices-templates,index templates>> that are
configured. The index operation also creates a dynamic mapping if one does not
already exist. By default, new fields and objects will automatically be added
to the mapping definition if needed. Check out the <<mapping,mapping>> section
for more information on mapping definitions, and the the
for more information on mapping definitions, and the
<<indices-put-mapping,put mapping>> API for information about updating mappings
manually.

Expand Down
2 changes: 1 addition & 1 deletion docs/reference/ml/apis/get-ml-info.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -58,4 +58,4 @@ This is a possible response:
"limits" : { }
}
----
// TESTRESPONSE
// TESTRESPONSE[s/"upgrade_mode": false/"upgrade_mode": $body.upgrade_mode/]
1 change: 1 addition & 0 deletions docs/reference/rollup/apis/stop-job.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ POST _rollup/job/sensor/_stop
--------------------------------------------------
// CONSOLE
// TEST[setup:sensor_started_rollup_job]
// TEST[s/_stop/_stop?wait_for_completion=true&timeout=10s/]

Which will return the response:

Expand Down
36 changes: 18 additions & 18 deletions docs/reference/search/suggesters/term-suggest.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ doesn't take the query into account that is part of request.

`field`::
The field to fetch the candidate suggestions from. This is
an required option that either needs to be set globally or per
a required option that either needs to be set globally or per
suggestion.

`analyzer`::
Expand Down Expand Up @@ -54,17 +54,17 @@ doesn't take the query into account that is part of request.

[horizontal]
`lowercase_terms`::
Lower cases the suggest text terms after text analysis.
Lowercases the suggest text terms after text analysis.

`max_edits`::
The maximum edit distance candidate suggestions can
have in order to be considered as a suggestion. Can only be a value
between 1 and 2. Any other value result in an bad request error being
between 1 and 2. Any other value results in a bad request error being
thrown. Defaults to 2.

`prefix_length`::
The number of minimal prefix characters that must
match in order be a candidate suggestions. Defaults to 1. Increasing
match in order be a candidate for suggestions. Defaults to 1. Increasing
this number improves spellcheck performance. Usually misspellings don't
occur in the beginning of terms. (Old name "prefix_len" is deprecated)

Expand All @@ -85,7 +85,7 @@ doesn't take the query into account that is part of request.

`max_inspections`::
A factor that is used to multiply with the
`shards_size` in order to inspect more candidate spell corrections on
`shards_size` in order to inspect more candidate spelling corrections on
the shard level. Can improve accuracy at the cost of performance.
Defaults to 5.

Expand All @@ -94,29 +94,29 @@ doesn't take the query into account that is part of request.
suggestion should appear in. This can be specified as an absolute number
or as a relative percentage of number of documents. This can improve
quality by only suggesting high frequency terms. Defaults to 0f and is
not enabled. If a value higher than 1 is specified then the number
not enabled. If a value higher than 1 is specified, then the number
cannot be fractional. The shard level document frequencies are used for
this option.

`max_term_freq`::
The maximum threshold in number of documents a
The maximum threshold in number of documents in which a
suggest text token can exist in order to be included. Can be a relative
percentage number (e.g 0.4) or an absolute number to represent document
frequencies. If an value higher than 1 is specified then fractional can
percentage number (e.g., 0.4) or an absolute number to represent document
frequencies. If a value higher than 1 is specified, then fractional can
not be specified. Defaults to 0.01f. This can be used to exclude high
frequency terms from being spellchecked. High frequency terms are
usually spelled correctly on top of this also improves the spellcheck
performance. The shard level document frequencies are used for this
option.
frequency terms -- which are usually spelled correctly -- from being spellchecked.
This also improves the spellcheck performance. The shard level document frequencies
are used for this option.

`string_distance`::
Which string distance implementation to use for comparing how similar
suggested terms are. Five possible values can be specified:
`internal` - The default based on damerau_levenshtein but highly optimized

** `internal`: The default based on damerau_levenshtein but highly optimized
for comparing string distance for terms inside the index.
`damerau_levenshtein` - String distance algorithm based on
** `damerau_levenshtein`: String distance algorithm based on
Damerau-Levenshtein algorithm.
`levenshtein` - String distance algorithm based on Levenshtein edit distance
** `levenshtein`: String distance algorithm based on Levenshtein edit distance
algorithm.
`jaro_winkler` - String distance algorithm based on Jaro-Winkler algorithm.
`ngram` - String distance algorithm based on character n-grams.
** `jaro_winkler`: String distance algorithm based on Jaro-Winkler algorithm.
** `ngram`: String distance algorithm based on character n-grams.
Original file line number Diff line number Diff line change
Expand Up @@ -458,6 +458,15 @@ protected boolean preserveILMPoliciesUponCompletion() {
}

private void wipeCluster() throws Exception {

// Cleanup rollup before deleting indices. A rollup job might have bulks in-flight,
// so we need to fully shut them down first otherwise a job might stall waiting
// for a bulk to finish against a non-existing index (and then fail tests)
if (hasXPack && false == preserveRollupJobsUponCompletion()) {
wipeRollupJobs();
waitForPendingRollupTasks();
}

if (preserveIndicesUponCompletion() == false) {
// wipe indices
try {
Expand Down Expand Up @@ -505,11 +514,6 @@ private void wipeCluster() throws Exception {
wipeClusterSettings();
}

if (hasXPack && false == preserveRollupJobsUponCompletion()) {
wipeRollupJobs();
waitForPendingRollupTasks();
}

if (hasXPack && false == preserveILMPoliciesUponCompletion()) {
deleteAllPolicies();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ public WatchStatus getStatus() {
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
status = in.readBoolean() ? WatchStatus.read(in) : null;
status = in.readBoolean() ? new WatchStatus(in) : null;
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ public WatchStatus getStatus() {
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
status = in.readBoolean() ? WatchStatus.read(in) : null;
status = in.readBoolean() ? new WatchStatus(in) : null;
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ public void readFrom(StreamInput in) throws IOException {
id = in.readString();
found = in.readBoolean();
if (found) {
status = WatchStatus.read(in);
status = new WatchStatus(in);
source = XContentSource.readFrom(in);
version = in.readZLong();
seqNo = in.readZLong();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
Expand All @@ -36,7 +37,7 @@
import static org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils.writeDate;
import static org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils.writeOptionalDate;

public class WatchStatus implements ToXContentObject, Streamable {
public class WatchStatus implements ToXContentObject, Streamable, Writeable {

public static final String INCLUDE_STATE = "include_state";

Expand All @@ -49,8 +50,26 @@ public class WatchStatus implements ToXContentObject, Streamable {
@Nullable private Map<String, String> headers;
private Map<String, ActionStatus> actions;

// for serialization
private WatchStatus() {
public WatchStatus(StreamInput in) throws IOException {
version = in.readLong();
lastChecked = readOptionalDate(in);
lastMetCondition = readOptionalDate(in);
int count = in.readInt();
Map<String, ActionStatus> actions = new HashMap<>(count);
for (int i = 0; i < count; i++) {
actions.put(in.readString(), ActionStatus.readFrom(in));
}
this.actions = unmodifiableMap(actions);
state = new State(in.readBoolean(), Instant.ofEpochMilli(in.readLong()).atZone(ZoneOffset.UTC));
boolean executionStateExists = in.readBoolean();
if (executionStateExists) {
executionState = ExecutionState.resolve(in.readString());
}
if (in.readBoolean()) {
headers = in.readMap(StreamInput::readString, StreamInput::readString);
} else {
headers = Collections.emptyMap();
}
}

public WatchStatus(ZonedDateTime now, Map<String, ActionStatus> actions) {
Expand Down Expand Up @@ -222,31 +241,7 @@ public void writeTo(StreamOutput out) throws IOException {

@Override
public void readFrom(StreamInput in) throws IOException {
version = in.readLong();
lastChecked = readOptionalDate(in);
lastMetCondition = readOptionalDate(in);
int count = in.readInt();
Map<String, ActionStatus> actions = new HashMap<>(count);
for (int i = 0; i < count; i++) {
actions.put(in.readString(), ActionStatus.readFrom(in));
}
this.actions = unmodifiableMap(actions);
state = new State(in.readBoolean(), Instant.ofEpochMilli(in.readLong()).atZone(ZoneOffset.UTC));
boolean executionStateExists = in.readBoolean();
if (executionStateExists) {
executionState = ExecutionState.resolve(in.readString());
}
if (in.readBoolean()) {
headers = in.readMap(StreamInput::readString, StreamInput::readString);
} else {
headers = Collections.emptyMap();
}
}

public static WatchStatus read(StreamInput in) throws IOException {
WatchStatus status = new WatchStatus();
status.readFrom(in);
return status;
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
}

@Override
Expand Down
Loading

0 comments on commit 2c77ad4

Please sign in to comment.