Skip to content

Commit

Permalink
go/e2e/txsource: add txsource SGX scenario that uses less nodes
Browse files Browse the repository at this point in the history
  • Loading branch information
ptrus committed Jan 4, 2021
1 parent 0a570de commit c05d7be
Show file tree
Hide file tree
Showing 4 changed files with 64 additions and 19 deletions.
2 changes: 1 addition & 1 deletion .buildkite/code.pipeline.yml
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ steps:
- /tmp/e2e/**/*.log
env:
OASIS_E2E_COVERAGE: enable
OASIS_EXCLUDE_E2E: e2e/runtime/txsource-multi
OASIS_EXCLUDE_E2E: e2e/runtime/txsource-multi,e2e/runtime/txsource-multi-short
TEST_BASE_DIR: /tmp
# libp2p logging.
IPFS_LOGGING: debug
Expand Down
1 change: 1 addition & 0 deletions .changelog/3595.internal.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
go/e2e/txsource: add txsource SGX scenario using fewer nodes
4 changes: 4 additions & 0 deletions go/oasis-test-runner/scenario/e2e/runtime/runtime.go
Original file line number Diff line number Diff line change
Expand Up @@ -582,6 +582,10 @@ func RegisterScenarios() error {
for _, s := range []scenario.Scenario{
// Transaction source test. Non-default, because it runs for ~6 hours.
TxSourceMulti,
// SGX version of the txsource-multi-short test. Non-default, because
// it is identical to the txsource-multi-short, only using fewer nodes
// due to SGX CI instance resource constrains.
TxSourceMultiShortSGX,
} {
if err := cmd.RegisterNondefault(s); err != nil {
return err
Expand Down
76 changes: 58 additions & 18 deletions go/oasis-test-runner/scenario/e2e/runtime/txsource.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,10 +68,38 @@ var TxSourceMultiShort scenario.Scenario = &txSourceImpl{
consensusPruneDisabledProbability: 0.1,
consensusPruneMinKept: 100,
consensusPruneMaxKept: 200,
// XXX: use no more than 2 storage, 4 compute nodes as SGX E2E test
// instances cannot handle any more nodes that are currently configured.
numStorageNodes: 2,
numComputeNodes: 4,
numValidatorNodes: 4,
numKeyManagerNodes: 2,
numStorageNodes: 2,
numComputeNodes: 4,
}

// TxSourceMultiShortSGX uses multiple workloads for a short time.
var TxSourceMultiShortSGX scenario.Scenario = &txSourceImpl{
runtimeImpl: *newRuntimeImpl("txsource-multi-short-sgx", "", nil),
clientWorkloads: []string{
workload.NameCommission,
workload.NameDelegation,
workload.NameOversized,
workload.NameParallel,
workload.NameRegistration,
workload.NameRuntime,
workload.NameTransfer,
},
allNodeWorkloads: []string{
workload.NameQueries,
},
timeLimit: timeLimitShort,
livenessCheckInterval: livenessCheckInterval,
consensusPruneDisabledProbability: 0.1,
consensusPruneMinKept: 100,
consensusPruneMaxKept: 200,
// XXX: don't use more node as SGX E2E test instances cannot handle much
// more nodes that are currently configured.
numValidatorNodes: 3,
numKeyManagerNodes: 1,
numStorageNodes: 2,
numComputeNodes: 4,
}

// TxSourceMulti uses multiple workloads.
Expand Down Expand Up @@ -102,6 +130,12 @@ var TxSourceMulti scenario.Scenario = &txSourceImpl{
// node is restarted. Enable automatic corrupted WAL recovery for validator
// nodes.
tendermintRecoverCorruptedWAL: true,
// Use 4 validators so that consensus can keep making progress
// when a node is being killed and restarted.
numValidatorNodes: 4,
// Use 2 keymanager so that at least one keymanager is accessible when
// the other one is being killed or shut down.
numKeyManagerNodes: 2,
// Use 4 storage nodes so runtime continues to work when one of the nodes
// is shut down.
numStorageNodes: 4,
Expand Down Expand Up @@ -133,6 +167,10 @@ type txSourceImpl struct { // nolint: maligned

enableCrashPoints bool

numValidatorNodes int
numKeyManagerNodes int
numComputeNodes int

// Configurable number of storage nodes. If running tests with long node
// shutdowns enabled, make sure this is at least `MinWriteReplication+1`,
// so that the runtime continues to work, even if one of the nodes is shut
Expand All @@ -142,9 +180,6 @@ type txSourceImpl struct { // nolint: maligned
// nodes in the short test variant.
numStorageNodes int

// Configurable number of compute nodes.
numComputeNodes int

rng *rand.Rand
seed string
}
Expand Down Expand Up @@ -336,14 +371,21 @@ func (sc *txSourceImpl) Fixture() (*oasis.NetworkFixture, error) {
f.Network.DefaultLogWatcherHandlerFactories = []log.WatcherHandlerFactory{}
}

// Use at least 4 validators so that consensus can keep making progress
// when a node is being killed and restarted.
f.Validators = []oasis.ValidatorFixture{
{Entity: 1},
{Entity: 1},
{Entity: 1},
{Entity: 1},
var validators []oasis.ValidatorFixture
for i := 0; i < sc.numValidatorNodes; i++ {
validators = append(validators, oasis.ValidatorFixture{
Entity: 1,
})
}
f.Validators = validators
var keymanagers []oasis.KeymanagerFixture
for i := 0; i < sc.numKeyManagerNodes; i++ {
keymanagers = append(keymanagers, oasis.KeymanagerFixture{
Runtime: 0,
Entity: 1,
})
}
f.Keymanagers = keymanagers
var computeWorkers []oasis.ComputeWorkerFixture
for i := 0; i < sc.numComputeNodes; i++ {
computeWorkers = append(computeWorkers, oasis.ComputeWorkerFixture{
Expand All @@ -352,10 +394,6 @@ func (sc *txSourceImpl) Fixture() (*oasis.NetworkFixture, error) {
})
}
f.ComputeWorkers = computeWorkers
f.Keymanagers = []oasis.KeymanagerFixture{
{Runtime: 0, Entity: 1},
{Runtime: 0, Entity: 1},
}
var storageWorkers []oasis.StorageWorkerFixture
for i := 0; i < sc.numStorageNodes; i++ {
storageWorkers = append(storageWorkers, oasis.StorageWorkerFixture{
Expand Down Expand Up @@ -672,6 +710,8 @@ func (sc *txSourceImpl) Clone() scenario.Scenario {
consensusPruneMaxKept: sc.consensusPruneMaxKept,
tendermintRecoverCorruptedWAL: sc.tendermintRecoverCorruptedWAL,
enableCrashPoints: sc.enableCrashPoints,
numValidatorNodes: sc.numValidatorNodes,
numKeyManagerNodes: sc.numKeyManagerNodes,
numStorageNodes: sc.numStorageNodes,
numComputeNodes: sc.numComputeNodes,
seed: sc.seed,
Expand Down

0 comments on commit c05d7be

Please sign in to comment.