Skip to content

Commit

Permalink
Reduce global checkpoint sync interval in disruption tests
Browse files Browse the repository at this point in the history
This should remove flakiness of the cluster disruption integration tests.
See elastic/elasticsearch#38931 and
elastic/elasticsearch@d49d9b5
  • Loading branch information
seut committed Aug 29, 2019
1 parent 77f45c2 commit d88092e
Show file tree
Hide file tree
Showing 5 changed files with 17 additions and 4 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,7 @@ private StartRecoveryRequest getStartRecoveryRequest(final RecoveryTarget recove
logger.trace("{} preparing for file-based recovery from [{}]", recoveryTarget.shardId(), recoveryTarget.sourceNode());
} else {
logger.trace(
"{} preparing for sequence-number-based recovery starting at local checkpoint [{}] from [{}]",
"{} preparing for sequence-number-based recovery starting at sequence number [{}] from [{}]",
recoveryTarget.shardId(),
startingSeqNo,
recoveryTarget.sourceNode());
Expand Down
6 changes: 5 additions & 1 deletion sql/src/main/java/io/crate/analyze/TableParameters.java
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.engine.EngineConfig;
import org.elasticsearch.index.mapper.MapperService;
Expand Down Expand Up @@ -96,7 +97,10 @@ public class TableParameters {
IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING,
IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING,
IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING,
EngineConfig.INDEX_CODEC_SETTING
EngineConfig.INDEX_CODEC_SETTING,

// do NOT expose this setting, it is only needed for tests, see ClusterDisruptionIT.
IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING
);

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ public void testBuildCreateTableColumns() throws Exception {
" \"blocks.write\" = false,\n" +
" codec = 'default',\n" +
" column_policy = 'strict',\n" +
" \"global_checkpoint_sync.interval\" = 30000,\n" +
" \"mapping.total_fields.limit\" = 1000,\n" +
" max_ngram_diff = 1,\n" +
" max_shingle_diff = 3,\n" +
Expand Down Expand Up @@ -139,6 +140,7 @@ public void testBuildCreateTablePrimaryKey() throws Exception {
" \"blocks.write\" = false,\n" +
" codec = 'default',\n" +
" column_policy = 'strict',\n" +
" \"global_checkpoint_sync.interval\" = 30000,\n" +
" \"mapping.total_fields.limit\" = 1000,\n" +
" max_ngram_diff = 1,\n" +
" max_shingle_diff = 3,\n" +
Expand Down Expand Up @@ -187,6 +189,7 @@ public void testBuildCreateTableNotNull() throws Exception {
" \"blocks.write\" = false,\n" +
" codec = 'default',\n" +
" column_policy = 'strict',\n" +
" \"global_checkpoint_sync.interval\" = 30000,\n" +
" \"mapping.total_fields.limit\" = 1000,\n" +
" max_ngram_diff = 1,\n" +
" max_shingle_diff = 3,\n" +
Expand Down Expand Up @@ -235,6 +238,7 @@ public void testBuildCreateTableClusteredByPartitionedBy() throws Exception {
" \"blocks.write\" = false,\n" +
" codec = 'default',\n" +
" column_policy = 'strict',\n" +
" \"global_checkpoint_sync.interval\" = 30000,\n" +
" \"mapping.total_fields.limit\" = 1000,\n" +
" max_ngram_diff = 1,\n" +
" max_shingle_diff = 3,\n" +
Expand Down Expand Up @@ -308,6 +312,7 @@ public void testBuildCreateTableIndexes() throws Exception {
" \"blocks.write\" = false,\n" +
" codec = 'default',\n" +
" column_policy = 'strict',\n" +
" \"global_checkpoint_sync.interval\" = 30000,\n" +
" \"mapping.total_fields.limit\" = 1000,\n" +
" max_ngram_diff = 1,\n" +
" max_shingle_diff = 3,\n" +
Expand Down Expand Up @@ -352,6 +357,7 @@ public void testBuildCreateTableStorageDefinitions() throws Exception {
" \"blocks.write\" = false,\n" +
" codec = 'default',\n" +
" column_policy = 'strict',\n" +
" \"global_checkpoint_sync.interval\" = 30000,\n" +
" \"mapping.total_fields.limit\" = 1000,\n" +
" max_ngram_diff = 1,\n" +
" max_shingle_diff = 3,\n" +
Expand Down Expand Up @@ -395,6 +401,7 @@ public void testBuildCreateTableColumnDefaultClause() throws Exception {
" \"blocks.write\" = false,\n" +
" codec = 'default',\n" +
" column_policy = 'strict',\n" +
" \"global_checkpoint_sync.interval\" = 30000,\n" +
" \"mapping.total_fields.limit\" = 1000,\n" +
" max_ngram_diff = 1,\n" +
" max_shingle_diff = 3,\n" +
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@
import org.elasticsearch.plugin.repository.url.URLRepositoryPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.InternalSettingsPlugin;
import org.elasticsearch.threadpool.ThreadPool;
import org.hamcrest.Matcher;
import org.hamcrest.Matchers;
Expand Down Expand Up @@ -189,7 +190,8 @@ protected Collection<Class<? extends Plugin>> nodePlugins() {
CrateCommonPlugin.class,
HttpTransportPlugin.class,
CommonAnalysisPlugin.class,
URLRepositoryPlugin.class
URLRepositoryPlugin.class,
InternalSettingsPlugin.class
);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ public void testAckedIndexing() throws Exception {

logger.info("creating table t clustered into {} shards with {} replicas", numberOfShards, replicas);
execute("create table t (id int primary key, x string) clustered into " + numberOfShards + " shards " +
"with (number_of_replicas = " + replicas + ", \"write.wait_for_active_shards\" = 1)");
"with (number_of_replicas = " + replicas + ", \"write.wait_for_active_shards\" = 1, \"global_checkpoint_sync.interval\"='1s')");
ensureGreen();

ServiceDisruptionScheme disruptionScheme = addRandomDisruptionScheme();
Expand Down

0 comments on commit d88092e

Please sign in to comment.