Skip to content

Commit

Permalink
Merge remote-tracking branch 'elastic/master' into retention-lease-un…
Browse files Browse the repository at this point in the history
…follow

* elastic/master: (37 commits)
  Enable test logging for TransformIntegrationTests#testSearchTransform.
  stronger wording for ilm+rollover in docs (elastic#39159)
  Mute SingleNodeTests (elastic#39156)
  AwaitsFix XPackUsageIT#testXPackCcrUsage.
  Resolve concurrency with watcher trigger service (elastic#39092)
  Fix median calculation in MedianAbsoluteDeviationAggregatorTests (elastic#38979)
  [DOCS] Edits the remote clusters documentation (elastic#38996)
  add version 6.6.2
  Revert "Mute failing test 20_mix_typless_typefull (elastic#38781)" (elastic#38912)
  Rebuild remote connections on profile changes (elastic#37678)
  Document 'max_size' parameter as shard size for rollover (elastic#38750)
  Add some missing toString() implementations (elastic#39124)
  Migrate Streamable to Writeable for cluster block package (elastic#37391)
  fix RethrottleTests retry (elastic#38978)
  Disable date parsing test in non english locale (elastic#39052)
  Remove BCryptTests (elastic#39098)
  [ML] Stop the ML memory tracker before closing node (elastic#39111)
  Allow retention lease operations under blocks (elastic#39089)
  ML refactor DatafeedsConfig(Update) so defaults are not populated in queries or aggs (elastic#38822)
  Fix retention leases sync on recovery test
  ...
  • Loading branch information
jasontedor committed Feb 20, 2019
2 parents fd795a2 + 5eef4ad commit f6ecbf2
Show file tree
Hide file tree
Showing 89 changed files with 1,335 additions and 688 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@ public void setDistribution(Distribution distribution) {
public void freeze() {
requireNonNull(distribution, "null distribution passed when configuring test cluster `" + this + "`");
requireNonNull(version, "null version passed when configuring test cluster `" + this + "`");
requireNonNull(javaHome, "null javaHome passed when configuring test cluster `" + this + "`");
logger.info("Locking configuration of `{}`", this);
configurationFrozen.set(true);
}
Expand Down Expand Up @@ -204,16 +205,7 @@ private void startElasticsearchProcess(Path distroArtifact) {
Map<String, String> environment = processBuilder.environment();
// Don't inherit anything from the environment for as that would lack reproductability
environment.clear();
if (javaHome != null) {
environment.put("JAVA_HOME", getJavaHome().getAbsolutePath());
} else if (System.getenv().get("JAVA_HOME") != null) {
logger.warn("{}: No java home configured will use it from environment: {}",
this, System.getenv().get("JAVA_HOME")
);
environment.put("JAVA_HOME", System.getenv().get("JAVA_HOME"));
} else {
logger.warn("{}: No javaHome configured, will rely on default java detection", this);
}
environment.put("JAVA_HOME", getJavaHome().getAbsolutePath());
environment.put("ES_PATH_CONF", configFile.getParent().toAbsolutePath().toString());
environment.put("ES_JAVA_OPTIONS", "-Xms512m -Xmx512m");
// don't buffer all in memory, make sure we don't block on the default pipes
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1261,7 +1261,8 @@ public void testGetAlias() throws IOException {
GetAliasesResponse getAliasesResponse = execute(getAliasesRequest, highLevelClient().indices()::getAlias,
highLevelClient().indices()::getAliasAsync);

assertThat(getAliasesResponse.getAliases().size(), equalTo(3));
assertThat("Unexpected number of aliases, got: " + getAliasesResponse.getAliases().toString(),
getAliasesResponse.getAliases().size(), equalTo(3));
assertThat(getAliasesResponse.getAliases().get("index1").size(), equalTo(1));
AliasMetaData aliasMetaData1 = getAliasesResponse.getAliases().get("index1").iterator().next();
assertThat(aliasMetaData1, notNullValue());
Expand Down
4 changes: 1 addition & 3 deletions distribution/docker/src/docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,6 @@ RUN curl --retry 8 -s ${jdkUrl} | tar -C /opt -zxf -
# REF: https://github.com/elastic/elasticsearch-docker/issues/171
RUN ln -sf /etc/pki/ca-trust/extracted/java/cacerts /opt/jdk-${jdkVersion}/lib/security/cacerts

RUN yum install -y unzip which

RUN groupadd -g 1000 elasticsearch && \
adduser -u 1000 -g 1000 -d /usr/share/elasticsearch elasticsearch

Expand All @@ -51,7 +49,7 @@ ENV JAVA_HOME /opt/jdk-${jdkVersion}
COPY --from=builder /opt/jdk-${jdkVersion} /opt/jdk-${jdkVersion}

RUN yum update -y && \
yum install -y nc unzip wget which && \
yum install -y nc && \
yum clean all

RUN groupadd -g 1000 elasticsearch && \
Expand Down
218 changes: 133 additions & 85 deletions docs/plugins/repository-s3.asciidoc

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
[[analysis-synonym-graph-tokenfilter]]
=== Synonym Graph Token Filter

beta[]

The `synonym_graph` token filter allows to easily handle synonyms,
including multi-word synonyms correctly during the analysis process.

Expand Down Expand Up @@ -187,3 +185,8 @@ multiple versions of a token may choose which version of the token to emit when
parsing synonyms, e.g. `asciifolding` will only produce the folded version of the
token. Others, e.g. `multiplexer`, `word_delimiter_graph` or `ngram` will throw an
error.

WARNING:The synonym rules should not contain words that are removed by
a filter that appears after in the chain (a `stop` filter for instance).
Removing a term from a synonym rule breaks the matching at query time.

2 changes: 1 addition & 1 deletion docs/reference/ilm/policy-definitions.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -390,7 +390,7 @@ existing index meets one of the rollover conditions.
[options="header"]
|======
| Name | Required | Default | Description
| `max_size` | no | - | max index storage size.
| `max_size` | no | - | max primary shard index storage size.
See <<byte-units, Byte Units>>
for formatting
| `max_docs` | no | - | max number of documents an
Expand Down
7 changes: 4 additions & 3 deletions docs/reference/ilm/set-up-lifecycle-policy.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,8 @@ PUT test-index
-----------------------
// CONSOLE

IMPORTANT: Its recommended not to use the create index API with a policy that
IMPORTANT: Do not to use the create index API with a policy that
defines a rollover action. If you do so, the new index as the result of the
rollover will not carry forward the policy. Always use index templates to
define policies with rollover actions.
rollover will not carry forward the policy. Always use
<<applying-policy-to-template, index templates>> to define policies with rollover
actions.
4 changes: 2 additions & 2 deletions docs/reference/ilm/using-policies-rollover.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ The rollover action takes the following parameters:
.`rollover` Action Parameters
|===
|Name |Description
|max_size |The maximum estimated size the index is allowed to grow
to. Defaults to `null`. Optional.
|max_size |The maximum estimated size the primary shard of the index is allowed
to grow to. Defaults to `null`. Optional.
|max_docs |The maximum number of document the index should
contain. Defaults to `null`. Optional.
|max_age |The maximum age of the index. Defaults to `null`. Optional.
Expand Down
9 changes: 9 additions & 0 deletions docs/reference/indices/rollover-index.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,15 @@ from the original (rolled-over) index.
In this scenario, the write index will have its rollover alias' `is_write_index` set to `false`, while the newly created index
will now have the rollover alias pointing to it as the write index with `is_write_index` as `true`.

The available conditions are:

.`conditions` parameters
|===
| Name | Description
| max_age | The maximum age of the index
| max_docs | The maximum number of documents the index should contain. This does not add documents multiple times for replicas
| max_size | The maximum estimated size of the primary shard of the index
|===

[source,js]
--------------------------------------------------
Expand Down
90 changes: 63 additions & 27 deletions docs/reference/modules/remote-clusters.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -2,64 +2,64 @@
== Remote clusters

ifndef::include-xpack[]
The _remote clusters_ module allows establishing uni-directional connections to
a remote cluster. This functionality is used in
The _remote clusters_ module enables you to establish uni-directional
connections to a remote cluster. This functionality is used in
<<modules-cross-cluster-search,cross-cluster search>>.
endif::[]
ifdef::include-xpack[]
The _remote clusters_ module allows establishing uni-directional connections to
a remote cluster. This functionality is used in cross-cluster replication, and
The _remote clusters_ module enables you to establish uni-directional
connections to a remote cluster. This functionality is used in
{stack-ov}/xpack-ccr.html[cross-cluster replication] and
<<modules-cross-cluster-search,cross-cluster search>>.
endif::[]

Remote cluster connections work by configuring a remote cluster and connecting
only to a limited number of nodes in the remote cluster. Each remote cluster is
referenced by a name and a list of seed nodes. When a remote cluster is
referenced by a name and a list of seed nodes. When a remote cluster is
registered, its cluster state is retrieved from one of the seed nodes so that by
default up to three _gateway nodes_ are selected to be connected to as part of
remote cluster requests. Remote cluster connections consist of uni-directional
connections from the coordinating node to the previously selected remote nodes
only. It is possible to tag which nodes should be selected through node
attributes (see <<remote-cluster-settings>>).
only. You can tag which nodes should be selected by using node attributes (see <<remote-cluster-settings>>).

Each node in a cluster that has remote clusters configured connects to one or
more _gateway nodes_ and uses them to federate requests to the remote cluster.

[float]
[[configuring-remote-clusters]]
=== Configuring Remote Clusters
=== Configuring remote clusters

Remote clusters can be specified globally using
<<cluster-update-settings,cluster settings>> (which can be updated dynamically),
or local to individual nodes using the `elasticsearch.yml` file.
You can configure remote clusters globally by using
<<cluster-update-settings,cluster settings>>, which you can update dynamically.
Alternatively, you can configure them locally on individual nodes by using the `elasticsearch.yml` file.

If a remote cluster is configured via `elasticsearch.yml` only the nodes with
that configuration will be able to connect to the remote cluster. In other
words, functionality that relies on remote cluster requests will have to be
driven specifically from those nodes. Remote clusters set via the
<<cluster-update-settings,cluster settings API>> will be available on every node
in the cluster.

The `elasticsearch.yml` config file for a node that connects to remote clusters
needs to list the remote clusters that should be connected to, for instance:
If you specify the settings in `elasticsearch.yml` files, only the nodes with
those settings can connect to the remote cluster. In other words, functionality
that relies on remote cluster requests must be driven specifically from those
nodes. For example:

[source,yaml]
--------------------------------
cluster:
remote:
cluster_one: <1>
seeds: 127.0.0.1:9300
cluster_two: <1>
transport.ping_schedule: 30s <2>
cluster_two:
seeds: 127.0.0.1:9301
transport.compress: true <3>
--------------------------------
<1> `cluster_one` and `cluster_two` are arbitrary _cluster aliases_ representing
the connection to each cluster. These names are subsequently used to distinguish
between local and remote indices.
<2> A keep-alive ping is configured for `cluster_one`.
<3> Compression is explicitly enabled for requests to `cluster_two`.

For more information about the optional transport settings, see
<<modules-transport>>.

The equivalent example using the <<cluster-update-settings,cluster settings
API>> to add remote clusters to all nodes in the cluster would look like the
following:
If you use <<cluster-update-settings,cluster settings>>, the remote clusters are available on every node in the cluster. For example:

[source,js]
--------------------------------
Expand All @@ -71,12 +71,14 @@ PUT _cluster/settings
"cluster_one": {
"seeds": [
"127.0.0.1:9300"
]
],
"transport.ping_schedule": "30s"
},
"cluster_two": {
"seeds": [
"127.0.0.1:9301"
]
],
"transport.compress": true
},
"cluster_three": {
"seeds": [
Expand All @@ -92,6 +94,40 @@ PUT _cluster/settings
// TEST[setup:host]
// TEST[s/127.0.0.1:9300/\${transport_host}/]

You can dynamically update the compression and ping schedule settings. However,
you must re-include seeds in the settings update request. For example:

[source,js]
--------------------------------
PUT _cluster/settings
{
"persistent": {
"cluster": {
"remote": {
"cluster_one": {
"seeds": [
"127.0.0.1:9300"
],
"transport.ping_schedule": "60s"
},
"cluster_two": {
"seeds": [
"127.0.0.1:9301"
],
"transport.compress": false
}
}
}
}
}
--------------------------------
// CONSOLE
// TEST[continued]

NOTE: When the compression or ping schedule settings change, all the existing
node connections must close and re-open, which can cause in-flight requests to
fail.

A remote cluster can be deleted from the cluster settings by setting its seeds
to `null`:

Expand Down Expand Up @@ -173,6 +209,6 @@ PUT _cluster/settings
[[retrieve-remote-clusters-info]]
=== Retrieving remote clusters info

The <<cluster-remote-info, Remote Cluster Info API>> allows to retrieve
You can use the <<cluster-remote-info, remote cluster info API>> to retrieve
information about the configured remote clusters, as well as the remote nodes
that the node is connected to.
14 changes: 14 additions & 0 deletions libs/ssl-config/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,23 @@ dependencies {
testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
}

if (isEclipse) {
// in eclipse the project is under a fake root, we need to change around the source sets
sourceSets {
if (project.path == ":libs:ssl-config") {
main.java.srcDirs = ['java']
main.resources.srcDirs = ['resources']
} else {
test.java.srcDirs = ['java']
test.resources.srcDirs = ['resources']
}
}
}

forbiddenApisMain {
replaceSignatureFiles 'jdk-signatures'
}

forbiddenPatterns {
exclude '**/*.key'
exclude '**/*.pem'
Expand Down
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
// this is just shell gradle file for eclipse to have separate projects for geo src and tests
// this is just shell gradle file for eclipse to have separate projects for ssl-config src and tests
apply from: '../../build.gradle'
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// this is just shell gradle file for eclipse to have separate projects for geo src and tests
// this is just shell gradle file for eclipse to have separate projects for ssl-config src and tests
apply from: '../../build.gradle'
dependencies {
testCompile project(':libs:elasticsearch-ssl-config')
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,6 @@ public void testInvalidJavaPattern() {
}

public void testJavaPatternLocale() {
// @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/31724")
assumeFalse("Can't run in a FIPS JVM, Joda parse date error", inFipsJvm());
DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10),
templatize(ZoneId.of("Europe/Amsterdam")), templatize(Locale.ITALIAN),
Expand All @@ -138,6 +137,18 @@ public void testJavaPatternLocale() {
assertThat(ingestDocument.getFieldValue("date_as_date", String.class), equalTo("2010-06-12T00:00:00.000+02:00"));
}

public void testJavaPatternEnglishLocale() {
// Since testJavaPatternLocale is muted in FIPS mode, test that we can correctly parse dates in english
DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10),
templatize(ZoneId.of("Europe/Amsterdam")), templatize(Locale.ENGLISH),
"date_as_string", Collections.singletonList("yyyy dd MMMM"), "date_as_date");
Map<String, Object> document = new HashMap<>();
document.put("date_as_string", "2010 12 June");
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
dateProcessor.execute(ingestDocument);
assertThat(ingestDocument.getFieldValue("date_as_date", String.class), equalTo("2010-06-12T00:00:00.000+02:00"));
}

public void testJavaPatternDefaultYear() {
String format = randomFrom("dd/MM", "8dd/MM");
DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
package org.elasticsearch.index.reindex;

import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup;
Expand All @@ -37,6 +38,7 @@
import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.both;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.hasSize;
Expand Down Expand Up @@ -191,13 +193,15 @@ private ListTasksResponse rethrottleTask(TaskId taskToRethrottle, float newReque
assertThat(rethrottleResponse.getTasks(), hasSize(1));
response.set(rethrottleResponse);
} catch (ElasticsearchException e) {
if (e.getCause() instanceof IllegalArgumentException) {
// We want to retry in this case so we throw an assertion error
logger.info("caught unprepared task, retrying until prepared");
throw new AssertionError("Rethrottle request for task [" + taskToRethrottle.getId() + "] failed", e);
} else {
Throwable unwrapped = ExceptionsHelper.unwrap(e, IllegalArgumentException.class);
if (unwrapped == null) {
throw e;
}
// We want to retry in this case so we throw an assertion error
assertThat(unwrapped.getMessage(), equalTo("task [" + taskToRethrottle.getId()
+ "] has not yet been initialized to the point where it knows how to rethrottle itself"));
logger.info("caught unprepared task, retrying until prepared");
throw new AssertionError("Rethrottle request for task [" + taskToRethrottle.getId() + "] failed", e);
}
});

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,10 +104,8 @@
"Implicitly create a typeless index while there is a typed template":

- skip:
#version: " - 6.99.99"
#reason: needs typeless index operations to work on typed indices
version: "all"
reason: "muted, waiting for #38711"
version: " - 6.99.99"
reason: needs typeless index operations to work on typed indices

- do:
indices.put_template:
Expand Down
Loading

0 comments on commit f6ecbf2

Please sign in to comment.