Skip to content

Commit

Permalink
Merge branch 'master' into feature/seq_no
Browse files Browse the repository at this point in the history
* master: (158 commits)
  Document the hack
  Refactor property placeholder use of env. vars
  Force java9 log4j hack in testing
  Fix log4j buggy java version detection
  Make java9 work again
  Don't mkdir directly in deb init script
  Fix env. var placeholder test so it's reproducible
  Remove ScriptMode class in favor of boolean true/false
  [rest api spec] fix doc urls
  Netty request/response tracer should wait for send
  Filter client/server VM options from jvm.options
  [rest api spec] fix url for reindex api docs
  Remove use of a Fields class in snapshot responses that contains x-content keys, in favor of declaring/using the keys directly.
  Limit retries of failed allocations per index (#18467)
  Proxy box method to use valueOf.
  Use the build-in valueOf method instead of the custom one.
  Fixed tests and added a comment to the box method.
  Fix boxing.
  Do not decode path when sending error
  Fix race condition in snapshot initialization
  ...
  • Loading branch information
jasontedor committed May 22, 2016
2 parents f6694be + cec4b9a commit ad7229f
Show file tree
Hide file tree
Showing 825 changed files with 25,923 additions and 15,550 deletions.
11 changes: 11 additions & 0 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,17 @@ Once your changes and tests are ready to submit for review:

Then sit back and wait. There will probably be discussion about the pull request and, if any changes are needed, we would love to work with you to get your pull request merged into Elasticsearch.

Please adhere to the general guideline that you should never force push
to a publicly shared branch. Once you have opened your pull request, you
should consider your branch publicly shared. Instead of force pushing
you can just add incremental commits; this is generally easier on your
reviewers. If you need to pick up changes from master, you can merge
master into your branch. A reviewer might ask you to rebase a
long-running pull request in which case force pushing is okay for that
request. Note that squashing at the end of the review process should
also not be done, that can be done when the pull request is [integrated
via GitHub](https://github.com/blog/2141-squash-your-commits).

Contributing to the Elasticsearch codebase
------------------------------------------

Expand Down
2 changes: 1 addition & 1 deletion TESTING.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ gradle test -Dtests.timeoutSuite=5000! ...
Change the logging level of ES (not gradle)

--------------------------------
gradle test -Des.logger.level=DEBUG
gradle test -Dtests.logger.level=DEBUG
--------------------------------

Print all the logging output from the test runs to the commandline
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -378,7 +378,7 @@ class BuildPlugin implements Plugin<Project> {
* -serial because we don't use java serialization.
*/
// don't even think about passing args with -J-xxx, oracle will ask you to submit a bug report :)
options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial' << '-Xdoclint:all' << '-Xdoclint:-missing'
options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial,-options' << '-Xdoclint:all' << '-Xdoclint:-missing'
// compile with compact 3 profile by default
// NOTE: this is just a compile time check: does not replace testing with a compact3 JRE
if (project.compactProfile != 'full') {
Expand Down Expand Up @@ -456,7 +456,7 @@ class BuildPlugin implements Plugin<Project> {
// default test sysprop values
systemProperty 'tests.ifNoTests', 'fail'
// TODO: remove setting logging level via system property
systemProperty 'es.logger.level', 'WARN'
systemProperty 'tests.logger.level', 'WARN'
for (Map.Entry<String, String> property : System.properties.entrySet()) {
if (property.getKey().startsWith('tests.') ||
property.getKey().startsWith('es.')) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,9 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
if (query != null) {
for (String param: query.tokenize('&')) {
def (String name, String value) = param.tokenize('=')
if (value == null) {
value = ''
}
current.println(" $name: \"$value\"")
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,11 @@ class NodeInfo {
}

env = [ 'JAVA_HOME' : project.javaHome ]
args.addAll("-E", "es.node.portsfile=true")
args.addAll("-E", "node.portsfile=true")
String loggerLevel = System.getProperty("tests.logger.level")
if (loggerLevel != null) {
args.addAll("-E", "logger.level=${loggerLevel}")
}
String collectedSystemProperties = config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" ")
String esJavaOpts = config.jvmArgs.isEmpty() ? collectedSystemProperties : collectedSystemProperties + " " + config.jvmArgs
env.put('ES_JAVA_OPTS', esJavaOpts)
Expand All @@ -140,7 +144,7 @@ class NodeInfo {
}
}
env.put('ES_JVM_OPTIONS', new File(confDir, 'jvm.options'))
args.addAll("-E", "es.path.conf=${confDir}")
args.addAll("-E", "path.conf=${confDir}")
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
args.add('"') // end the entire command, quoted
}
Expand Down
2 changes: 0 additions & 2 deletions buildSrc/src/main/resources/checkstyle_suppressions.xml
Original file line number Diff line number Diff line change
Expand Up @@ -517,7 +517,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]processor[/\\]ConvertProcessor.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]GcNames.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]HotThreads.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmGcMonitorService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmStats.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]Node.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]internal[/\\]InternalSettingsPreparer.java" checks="LineLength" />
Expand Down Expand Up @@ -1336,7 +1335,6 @@
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]junit[/\\]listeners[/\\]LoggingListener.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]ESRestTestCase.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]RestTestExecutionContext.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]client[/\\]RestClient.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]client[/\\]http[/\\]HttpRequestBuilder.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]json[/\\]JsonPath.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]parser[/\\]GreaterThanEqualToParser.java" checks="LineLength" />
Expand Down
2 changes: 1 addition & 1 deletion buildSrc/src/main/resources/deb/postinst.ftl
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
#!/bin/sh -e
#!/bin/bash -e
<% commands.each {command -> %><%= command %><% } %>
2 changes: 1 addition & 1 deletion buildSrc/src/main/resources/deb/preinst.ftl
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
#!/bin/sh -e
#!/bin/bash -e
<% commands.each {command -> %><%= command %><% } %>
6 changes: 2 additions & 4 deletions buildSrc/version.properties
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,7 @@ jna = 4.1.0
# test dependencies
randomizedrunner = 2.3.2
junit = 4.11
# TODO: Upgrade httpclient to a version > 4.5.1 once released. Then remove o.e.test.rest.client.StrictHostnameVerifier* and use
# DefaultHostnameVerifier instead since we no longer need to workaround https://issues.apache.org/jira/browse/HTTPCLIENT-1698
httpclient = 4.3.6
httpcore = 4.3.3
httpclient = 4.5.2
httpcore = 4.4.4
commonslogging = 1.1.3
commonscodec = 1.10
37 changes: 37 additions & 0 deletions core/src/main/java/org/apache/log4j/Java9Hack.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.apache.log4j;

import org.apache.log4j.helpers.ThreadLocalMap;

/**
* Log4j 1.2 MDC breaks because it parses java.version incorrectly (does not handle new java9 versioning).
*
* This hack fixes up the pkg private members as if it had detected the java version correctly.
*/
public class Java9Hack {

public static void fixLog4j() {
if (MDC.mdc.tlm == null) {
MDC.mdc.java1 = false;
MDC.mdc.tlm = new ThreadLocalMap();
}
}
}
4 changes: 4 additions & 0 deletions core/src/main/java/org/elasticsearch/Version.java
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,8 @@ public class Version {
public static final Version V_2_3_1 = new Version(V_2_3_1_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final int V_2_3_2_ID = 2030299;
public static final Version V_2_3_2 = new Version(V_2_3_2_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final int V_2_3_3_ID = 2030399;
public static final Version V_2_3_3 = new Version(V_2_3_3_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final int V_5_0_0_alpha1_ID = 5000001;
public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
public static final int V_5_0_0_alpha2_ID = 5000002;
Expand All @@ -94,6 +96,8 @@ public static Version fromId(int id) {
return V_5_0_0_alpha2;
case V_5_0_0_alpha1_ID:
return V_5_0_0_alpha1;
case V_2_3_3_ID:
return V_2_3_3;
case V_2_3_2_ID:
return V_2_3_2;
case V_2_3_1_ID:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

package org.elasticsearch.action.admin.cluster.allocation;

import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.apache.lucene.index.CorruptIndexException;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
Expand All @@ -30,24 +29,19 @@
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterInfoService;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.metadata.MetaData.Custom;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingNodes.RoutingNodesIterator;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
Expand All @@ -60,7 +54,6 @@
import org.elasticsearch.transport.TransportService;

import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
Expand All @@ -72,7 +65,6 @@
public class TransportClusterAllocationExplainAction
extends TransportMasterNodeAction<ClusterAllocationExplainRequest, ClusterAllocationExplainResponse> {

private final AllocationService allocationService;
private final ClusterInfoService clusterInfoService;
private final AllocationDeciders allocationDeciders;
private final ShardsAllocator shardAllocator;
Expand All @@ -82,12 +74,10 @@ public class TransportClusterAllocationExplainAction
public TransportClusterAllocationExplainAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
AllocationService allocationService, ClusterInfoService clusterInfoService,
AllocationDeciders allocationDeciders, ShardsAllocator shardAllocator,
TransportIndicesShardStoresAction shardStoresAction) {
ClusterInfoService clusterInfoService, AllocationDeciders allocationDeciders,
ShardsAllocator shardAllocator, TransportIndicesShardStoresAction shardStoresAction) {
super(settings, ClusterAllocationExplainAction.NAME, transportService, clusterService, threadPool, actionFilters,
indexNameExpressionResolver, ClusterAllocationExplainRequest::new);
this.allocationService = allocationService;
this.clusterInfoService = clusterInfoService;
this.allocationDeciders = allocationDeciders;
this.shardAllocator = shardAllocator;
Expand Down Expand Up @@ -259,8 +249,8 @@ public static ClusterAllocationExplanation explainShard(ShardRouting shard, Rout
protected void masterOperation(final ClusterAllocationExplainRequest request, final ClusterState state,
final ActionListener<ClusterAllocationExplainResponse> listener) {
final RoutingNodes routingNodes = state.getRoutingNodes();
final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state.nodes(),
clusterInfoService.getClusterInfo(), System.nanoTime());
final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state,
clusterInfoService.getClusterInfo(), System.nanoTime(), false);

ShardRouting foundShard = null;
if (request.useAnyUnassignedShard()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,10 @@
* Request to submit cluster reroute allocation commands
*/
public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteRequest> {
AllocationCommands commands = new AllocationCommands();
boolean dryRun;
boolean explain;
private AllocationCommands commands = new AllocationCommands();
private boolean dryRun;
private boolean explain;
private boolean retryFailed;

public ClusterRerouteRequest() {
}
Expand Down Expand Up @@ -81,13 +82,30 @@ public ClusterRerouteRequest explain(boolean explain) {
return this;
}

/**
* Sets the retry failed flag (defaults to <tt>false</tt>). If true, the
* request will retry allocating shards that can't currently be allocated due to too many allocation failures.
*/
public ClusterRerouteRequest setRetryFailed(boolean retryFailed) {
this.retryFailed = retryFailed;
return this;
}

/**
* Returns the current explain flag
*/
public boolean explain() {
return this.explain;
}

/**
* Returns the current retry failed flag
*/
public boolean isRetryFailed() {
return this.retryFailed;
}


/**
* Set the allocation commands to execute.
*/
Expand All @@ -96,6 +114,13 @@ public ClusterRerouteRequest commands(AllocationCommand... commands) {
return this;
}

/**
* Returns the allocation commands to execute
*/
public AllocationCommands getCommands() {
return commands;
}

/**
* Sets the source for the request.
*/
Expand Down Expand Up @@ -136,6 +161,7 @@ public void readFrom(StreamInput in) throws IOException {
commands = AllocationCommands.readFrom(in);
dryRun = in.readBoolean();
explain = in.readBoolean();
retryFailed = in.readBoolean();
readTimeout(in);
}

Expand All @@ -145,6 +171,7 @@ public void writeTo(StreamOutput out) throws IOException {
AllocationCommands.writeTo(commands, out);
out.writeBoolean(dryRun);
out.writeBoolean(explain);
out.writeBoolean(retryFailed);
writeTimeout(out);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,15 @@ public ClusterRerouteRequestBuilder setExplain(boolean explain) {
return this;
}

/**
* Sets the retry failed flag (defaults to <tt>false</tt>). If true, the
* request will retry allocating shards that can't currently be allocated due to too many allocation failures.
*/
public ClusterRerouteRequestBuilder setRetryFailed(boolean retryFailed) {
request.setRetryFailed(retryFailed);
return this;
}

/**
* Sets the commands for the request to execute.
*/
Expand Down
Loading

0 comments on commit ad7229f

Please sign in to comment.