From 165903d030f8b7acd24300790b9761ddb33d296b Mon Sep 17 00:00:00 2001
From: noot <36753753+noot@users.noreply.github.com>
Date: Fri, 15 Oct 2021 16:36:57 +0200
Subject: [PATCH] maintenance: cleanup TODOs, add issues numbers to remaining
TODOs (#1876)
---
cmd/gossamer/account_test.go | 126 +++--------
cmd/gossamer/config.go | 3 -
cmd/gossamer/config_test.go | 2 +-
cmd/gossamer/export.go | 2 -
cmd/gossamer/export_test.go | 4 +-
cmd/gossamer/main.go | 2 +
cmd/gossamer/main_test.go | 61 -----
cmd/gossamer/toml_config_test.go | 1 -
cmd/gossamer/utils.go | 2 -
docs/docs/usage/configuration.md | 6 +-
dot/build_spec.go | 7 +-
dot/config.go | 10 +-
dot/config_test.go | 2 -
dot/core/messages_test.go | 6 +-
dot/core/service.go | 34 ++-
dot/core/service_test.go | 69 ++----
dot/core/test_helpers.go | 2 +-
dot/digest/digest.go | 2 +-
dot/digest/digest_test.go | 65 ++----
dot/network/block_announce.go | 2 -
dot/network/connmgr.go | 12 +-
dot/network/host.go | 2 +-
dot/network/message.go | 3 +-
dot/network/notifications.go | 2 +-
dot/network/service.go | 10 +-
dot/network/state.go | 2 +-
dot/network/stream_manager_test.go | 2 +-
dot/network/sync.go | 10 +-
dot/network/test_helpers.go | 5 +-
dot/network/utils.go | 20 +-
dot/network/utils_test.go | 4 +-
dot/node.go | 2 +-
dot/node_test.go | 17 +-
dot/rpc/modules/api.go | 2 +-
dot/rpc/modules/author.go | 1 +
dot/rpc/modules/chain_test.go | 2 +-
dot/rpc/modules/grandpa.go | 2 +-
dot/rpc/modules/grandpa_test.go | 2 +-
dot/rpc/modules/mocks/storage_api.go | 6 +-
dot/rpc/modules/state.go | 52 +++--
dot/rpc/modules/state_test.go | 2 +-
dot/rpc/modules/system.go | 2 +-
dot/rpc/modules/system_test.go | 1 -
dot/rpc/subscription/websocket.go | 4 +-
dot/services.go | 7 +-
dot/services_test.go | 8 -
dot/state/block.go | 15 +-
dot/state/block_notify_test.go | 8 +-
dot/state/block_test.go | 20 +-
dot/state/initialize.go | 2 +-
dot/state/service.go | 2 +-
dot/state/service_test.go | 4 +-
dot/state/storage.go | 58 +++--
dot/state/storage_notify.go | 2 +-
dot/state/storage_test.go | 3 +-
dot/state/test_helpers.go | 23 +-
dot/sync/chain_processor.go | 4 +-
dot/sync/chain_sync.go | 19 +-
dot/sync/syncer_test.go | 2 +-
dot/sync/test_helpers.go | 3 +-
dot/sync/tip_syncer.go | 2 +-
dot/telemetry/telemetry.go | 2 +-
dot/types/babe.go | 2 +-
dot/types/extrinsic.go | 36 ++-
dot/utils.go | 10 +-
dot/utils_test.go | 13 --
lib/babe/babe.go | 70 ++----
lib/babe/babe_test.go | 29 +--
lib/babe/build_test.go | 21 +-
lib/babe/epoch.go | 3 +-
lib/babe/epoch_test.go | 2 +-
lib/babe/median.go | 171 --------------
lib/babe/median_test.go | 211 ------------------
lib/babe/verify_test.go | 100 +--------
lib/blocktree/blocktree.go | 42 ++--
lib/blocktree/blocktree_test.go | 32 +--
lib/blocktree/database.go | 8 +-
lib/blocktree/database_test.go | 2 +-
lib/blocktree/node_test.go | 4 +-
lib/common/well_known_keys.go | 26 ---
lib/genesis/helpers.go | 14 +-
lib/genesis/pallet.go | 6 +-
lib/genesis/test_utils.go | 20 +-
lib/grandpa/grandpa.go | 20 +-
lib/grandpa/grandpa_test.go | 16 +-
lib/grandpa/message_handler.go | 9 +-
lib/grandpa/message_tracker_test.go | 8 +-
lib/grandpa/round_test.go | 15 +-
lib/grandpa/state.go | 2 +-
lib/grandpa/types.go | 2 +-
lib/grandpa/vote_message.go | 2 +-
lib/grandpa/vote_message_test.go | 64 +++---
lib/keystore/helpers.go | 2 -
lib/keystore/keystore.go | 2 +-
.../runtime/common.go | 12 +-
lib/runtime/interface.go | 6 +-
lib/runtime/life/exports.go | 31 +--
lib/runtime/life/exports_test.go | 9 +-
lib/runtime/life/instance.go | 31 +--
lib/runtime/life/resolver.go | 38 ++--
lib/runtime/life/resolver_test.go | 26 ++-
lib/runtime/storage/trie.go | 31 ---
lib/runtime/wasmer/exports.go | 1 -
lib/runtime/wasmer/exports_test.go | 20 +-
lib/runtime/wasmer/imports.go | 43 ++--
lib/runtime/wasmer/imports_test.go | 6 +-
lib/runtime/wasmer/instance.go | 35 ++-
lib/runtime/wasmer/instance_test.go | 4 +-
lib/trie/database_test.go | 6 +-
lib/trie/proof_test.go | 2 +-
lib/trie/trie.go | 1 -
lib/utils/utils.go | 2 -
tests/stress/stress_test.go | 24 --
tests/utils/gossamer_utils.go | 3 -
114 files changed, 619 insertions(+), 1435 deletions(-)
delete mode 100644 lib/babe/median.go
delete mode 100644 lib/babe/median_test.go
rename dot/types/extrinsic_test.go => lib/runtime/common.go (68%)
diff --git a/cmd/gossamer/account_test.go b/cmd/gossamer/account_test.go
index 0b8656320a..98dbd44d9b 100644
--- a/cmd/gossamer/account_test.go
+++ b/cmd/gossamer/account_test.go
@@ -21,6 +21,8 @@ import (
"testing"
"github.com/ChainSafe/gossamer/lib/utils"
+
+ "github.com/stretchr/testify/require"
)
// TestAccountGenerate test "gossamer account --generate"
@@ -29,25 +31,18 @@ func TestAccountGenerate(t *testing.T) {
defer utils.RemoveTestDir(t)
directory := fmt.Sprintf("--basepath=%s", testDir)
err := app.Run([]string{"irrelevant", "account", directory, "--generate=true", "--password=false"})
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
+
ctx, err := newTestContext(
"Test gossamer account --generate",
[]string{"basepath", "generate"},
[]interface{}{testDir, "true"},
)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
command := accountCommand
err = command.Run(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- // TODO: check contents of data directory - improve cmd account tests
+ require.NoError(t, err)
}
// TestAccountGeneratePassword test "gossamer account --generate --password"
@@ -56,25 +51,18 @@ func TestAccountGeneratePassword(t *testing.T) {
defer utils.RemoveTestDir(t)
directory := fmt.Sprintf("--basepath=%s", testDir)
err := app.Run([]string{"irrelevant", "account", directory, "--generate=true", "--password=true"})
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
+
ctx, err := newTestContext(
"Test gossamer account --generate --password",
[]string{"basepath", "generate", "password"},
[]interface{}{testDir, "true", "1234"},
)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
command := accountCommand
err = command.Run(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- // TODO: check contents of data directory - improve cmd account tests
+ require.NoError(t, err)
}
// TestAccountGenerateEd25519 test "gossamer account --generate --ed25519"
@@ -83,25 +71,18 @@ func TestAccountGenerateEd25519(t *testing.T) {
defer utils.RemoveTestDir(t)
directory := fmt.Sprintf("--basepath=%s", testDir)
err := app.Run([]string{"irrelevant", "account", directory, "--generate=true", "--password=false", "--ed25519"})
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
+
ctx, err := newTestContext(
"Test gossamer account --generate --ed25519",
[]string{"basepath", "generate", "ed25519"},
[]interface{}{testDir, "true", "ed25519"},
)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
command := accountCommand
err = command.Run(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- // TODO: check contents of data directory - improve cmd account tests
+ require.NoError(t, err)
}
// TestAccountGenerateSr25519 test "gossamer account --generate --ed25519"
@@ -110,25 +91,18 @@ func TestAccountGenerateSr25519(t *testing.T) {
defer utils.RemoveTestDir(t)
directory := fmt.Sprintf("--basepath=%s", testDir)
err := app.Run([]string{"irrelevant", "account", directory, "--generate=true", "--password=false", "--sr25519"})
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
+
ctx, err := newTestContext(
"Test gossamer account --generate --sr25519",
[]string{"basepath", "generate", "sr25519"},
[]interface{}{testDir, "true", "sr25519"},
)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
command := accountCommand
err = command.Run(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- // TODO: check contents of data directory - improve cmd account tests
+ require.NoError(t, err)
}
// TestAccountGenerateSecp256k1 test "gossamer account --generate --ed25519"
@@ -137,25 +111,18 @@ func TestAccountGenerateSecp256k1(t *testing.T) {
defer utils.RemoveTestDir(t)
directory := fmt.Sprintf("--basepath=%s", testDir)
err := app.Run([]string{"irrelevant", "account", directory, "--generate=true", "--password=false", "--secp256k1"})
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
+
ctx, err := newTestContext(
"Test gossamer account --generate --secp256k1",
[]string{"basepath", "generate", "secp256k1"},
[]interface{}{testDir, "true", "secp256k1"},
)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
command := accountCommand
err = command.Run(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- // TODO: check contents of data directory - improve cmd account tests
+ require.NoError(t, err)
}
// TestAccountImport test "gossamer account --import"
@@ -163,27 +130,20 @@ func TestAccountImport(t *testing.T) {
testDir := utils.NewTestDir(t)
defer utils.RemoveTestDir(t)
directory := fmt.Sprintf("--basepath=%s", testDir)
- // TODO: Add import value for test
+
err := app.Run([]string{"irrelevant", "account", directory, "--import=./test_inputs/test-key.key"})
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
+
ctx, err := newTestContext(
"Test gossamer account --import=./test_inputs/test-key.key",
[]string{"basepath", "import"},
[]interface{}{"./test_inputs/", "test-key.key"},
)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
command := accountCommand
err = command.Run(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- // TODO: check contents of data directory - improve cmd account tests
+ require.NoError(t, err)
}
// TestAccountImport test "gossamer account --import-raw"
@@ -191,27 +151,20 @@ func TestAccountImportRaw(t *testing.T) {
testDir := utils.NewTestDir(t)
defer utils.RemoveTestDir(t)
directory := fmt.Sprintf("--basepath=%s", testDir)
- // TODO: Add import-raw value for test
+
err := app.Run([]string{"irrelevant", "account", directory, `--import-raw=0x33a6f3093f158a7109f679410bef1a0c54168145e0cecb4df006c1c2fffb1f09`, "--password=1234"})
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
+
ctx, err := newTestContext(
"Test gossamer account --import-raw=0x33a6f3093f158a7109f679410bef1a0c54168145e0cecb4df006c1c2fffb1f09 --password=1234",
[]string{"import-raw", "password"},
[]interface{}{"0x33a6f3093f158a7109f679410bef1a0c54168145e0cecb4df006c1c2fffb1f09", "1234"},
)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
command := accountCommand
err = command.Run(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- // TODO: check contents of data directory - improve cmd account tests
+ require.NoError(t, err)
}
// TestAccountList test "gossamer account --list"
@@ -220,23 +173,16 @@ func TestAccountList(t *testing.T) {
defer utils.RemoveTestDir(t)
directory := fmt.Sprintf("--basepath=%s", testDir)
err := app.Run([]string{"irrelevant", "account", directory, "--list"})
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
+
ctx, err := newTestContext(
"Test gossamer account --list",
[]string{"basepath", "list"},
[]interface{}{testDir, "true"},
)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
command := accountCommand
err = command.Run(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- // TODO: check contents of data directory - improve cmd account tests
+ require.NoError(t, err)
}
diff --git a/cmd/gossamer/config.go b/cmd/gossamer/config.go
index 4d9f1bdb8a..647ffade5c 100644
--- a/cmd/gossamer/config.go
+++ b/cmd/gossamer/config.go
@@ -555,8 +555,6 @@ func setDotCoreConfig(ctx *cli.Context, tomlCfg ctoml.CoreConfig, cfg *dot.CoreC
cfg.Roles = tomlCfg.Roles
cfg.BabeAuthority = tomlCfg.Roles == types.AuthorityRole
cfg.GrandpaAuthority = tomlCfg.Roles == types.AuthorityRole
- cfg.SlotDuration = tomlCfg.SlotDuration
- cfg.EpochLength = tomlCfg.EpochLength
// check --roles flag and update node configuration
if roles := ctx.GlobalString(RolesFlag.Name); roles != "" {
@@ -609,7 +607,6 @@ func setDotCoreConfig(ctx *cli.Context, tomlCfg ctoml.CoreConfig, cfg *dot.CoreC
"core configuration",
"babe-authority", cfg.BabeAuthority,
"grandpa-authority", cfg.GrandpaAuthority,
- "epoch-length", cfg.EpochLength,
"wasm-interpreter", cfg.WasmInterpreter,
)
}
diff --git a/cmd/gossamer/config_test.go b/cmd/gossamer/config_test.go
index c308feb09a..010aeee3c9 100644
--- a/cmd/gossamer/config_test.go
+++ b/cmd/gossamer/config_test.go
@@ -811,7 +811,7 @@ func TestUpdateConfigFromGenesisData(t *testing.T) {
Core: testCfg.Core,
Network: dot.NetworkConfig{
Port: testCfg.Network.Port,
- Bootnodes: []string{}, // TODO: improve cmd tests #687
+ Bootnodes: []string{},
ProtocolID: testCfg.Network.ProtocolID,
NoBootstrap: testCfg.Network.NoBootstrap,
NoMDNS: testCfg.Network.NoMDNS,
diff --git a/cmd/gossamer/export.go b/cmd/gossamer/export.go
index 07c0a2a95e..6d8fbe3a3a 100644
--- a/cmd/gossamer/export.go
+++ b/cmd/gossamer/export.go
@@ -113,8 +113,6 @@ func dotConfigToToml(dcfg *dot.Config) *ctoml.Config {
Roles: dcfg.Core.Roles,
BabeAuthority: dcfg.Core.BabeAuthority,
GrandpaAuthority: dcfg.Core.GrandpaAuthority,
- EpochLength: dcfg.Core.EpochLength,
- SlotDuration: dcfg.Core.SlotDuration,
}
cfg.Network = ctoml.NetworkConfig{
diff --git a/cmd/gossamer/export_test.go b/cmd/gossamer/export_test.go
index 2371638d82..4b999e473d 100644
--- a/cmd/gossamer/export_test.go
+++ b/cmd/gossamer/export_test.go
@@ -82,7 +82,7 @@ func TestExportCommand(t *testing.T) {
Core: testCfg.Core,
Network: dot.NetworkConfig{
Port: testCfg.Network.Port,
- Bootnodes: testCfg.Network.Bootnodes, // TODO: improve cmd tests #687
+ Bootnodes: testCfg.Network.Bootnodes,
ProtocolID: testCfg.Network.ProtocolID,
NoBootstrap: testCfg.Network.NoBootstrap,
NoMDNS: testCfg.Network.NoMDNS,
@@ -146,7 +146,7 @@ func TestExportCommand(t *testing.T) {
Core: testCfg.Core,
Network: dot.NetworkConfig{
Port: testCfg.Network.Port,
- Bootnodes: []string{testBootnode}, // TODO: improve cmd tests #687
+ Bootnodes: []string{testBootnode},
ProtocolID: testProtocol,
NoBootstrap: testCfg.Network.NoBootstrap,
NoMDNS: testCfg.Network.NoMDNS,
diff --git a/cmd/gossamer/main.go b/cmd/gossamer/main.go
index e09ba9f9ed..ea4b31194e 100644
--- a/cmd/gossamer/main.go
+++ b/cmd/gossamer/main.go
@@ -258,6 +258,7 @@ func gossamerAction(ctx *cli.Context) error {
}
ks := keystore.NewGlobalKeystore()
+ // load built-in test keys if specified by `cfg.Account.Key`
err = keystore.LoadKeystore(cfg.Account.Key, ks.Acco)
if err != nil {
logger.Error("failed to load account keystore", "error", err)
@@ -276,6 +277,7 @@ func gossamerAction(ctx *cli.Context) error {
return err
}
+ // load user keys if specified
err = unlockKeystore(ks.Acco, cfg.Global.BasePath, cfg.Account.Unlock, ctx.String(PasswordFlag.Name))
if err != nil {
logger.Error("failed to unlock keystore", "error", err)
diff --git a/cmd/gossamer/main_test.go b/cmd/gossamer/main_test.go
index 3d7ae8fdb3..11565efcdd 100644
--- a/cmd/gossamer/main_test.go
+++ b/cmd/gossamer/main_test.go
@@ -22,10 +22,8 @@ import (
"fmt"
"io"
"io/ioutil"
- "log"
"os"
"os/exec"
- "strconv"
"sync"
"syscall"
"testing"
@@ -232,59 +230,6 @@ func TestInvalidCommand(t *testing.T) {
}
}
-func TestGossamerCommand(t *testing.T) {
- t.Skip() // TODO: not sure how relevant this is anymore, it also slows down the tests a lot
-
- basePort := 7000
- genesisPath := utils.GetGssmrGenesisRawPath()
-
- tempDir, err := ioutil.TempDir("", "gossamer-maintest-")
- require.Nil(t, err)
-
- gossamer := runTestGossamer(t,
- "init",
- "--basepath", tempDir,
- "--genesis", genesisPath,
- "--force",
- )
-
- stdout, stderr := gossamer.GetOutput()
- t.Log("init gossamer output, ", "stdout", string(stdout), "stderr", string(stderr))
-
- expectedMessages := []string{
- "node initialised",
- }
-
- for _, m := range expectedMessages {
- require.Contains(t, string(stdout), m)
- }
-
- for i := 0; i < 10; i++ {
- t.Log("Going to gossamer cmd", "iteration", i)
-
- // start
- gossamer = runTestGossamer(t,
- "--port", strconv.Itoa(basePort),
- "--key", "alice",
- "--basepath", tempDir,
- "--roles", "4",
- )
-
- time.Sleep(10 * time.Second)
-
- stdout, stderr = gossamer.GetOutput()
- log.Println("Run gossamer output, ", "stdout", string(stdout), "stderr", string(stderr))
-
- expectedMessages = []string{
- "SIGABRT: abort",
- }
-
- for _, m := range expectedMessages {
- require.NotContains(t, string(stderr), m)
- }
- }
-}
-
func TestInitCommand_RenameNodeWhenCalled(t *testing.T) {
genesisPath := utils.GetGssmrGenesisRawPath()
@@ -344,9 +289,3 @@ func TestBuildSpecCommandWithOutput(t *testing.T) {
require.Empty(t, outb)
require.Empty(t, errb)
}
-
-// TODO: TestExportCommand test "gossamer export" does not error
-
-// TODO: TestInitCommand test "gossamer init" does not error
-
-// TODO: TestAccountCommand test "gossamer account" does not error
diff --git a/cmd/gossamer/toml_config_test.go b/cmd/gossamer/toml_config_test.go
index f7bb047d08..ed59028e04 100644
--- a/cmd/gossamer/toml_config_test.go
+++ b/cmd/gossamer/toml_config_test.go
@@ -54,7 +54,6 @@ func TestLoadConfigGssmr(t *testing.T) {
}
func TestLoadConfigKusama(t *testing.T) {
- t.Skip() // TODO: fix by updating kusama runtime
cfg := dot.KusamaConfig()
require.NotNil(t, cfg)
diff --git a/cmd/gossamer/utils.go b/cmd/gossamer/utils.go
index d83a4045d2..854b2c0197 100644
--- a/cmd/gossamer/utils.go
+++ b/cmd/gossamer/utils.go
@@ -85,8 +85,6 @@ func confirmMessage(msg string) bool {
func newTestConfig(t *testing.T) *dot.Config {
dir := utils.NewTestDir(t)
- // TODO: use default config instead of gssmr config for test config #776
-
cfg := &dot.Config{
Global: dot.GlobalConfig{
Name: dot.GssmrConfig().Global.Name,
diff --git a/docs/docs/usage/configuration.md b/docs/docs/usage/configuration.md
index 57979672c6..664b745f1e 100644
--- a/docs/docs/usage/configuration.md
+++ b/docs/docs/usage/configuration.md
@@ -12,10 +12,10 @@ Gossamer consumes a `.toml` file containing predefined settings for the node fro
```toml
[global]
-basepath = "~/.gossamer/gssmr" // TODO: confirm
+basepath = "~/.gossamer/gssmr"
log = " | trace | debug | info | warn | error | crit"
-cpuprof = "~/cpuprof.txt" // TODO: Syntax?
-memprof = "~/memprof.txt" // TODO: Syntax?
+cpuprof = "~/cpuprof.out"
+memprof = "~/memprof.out"
name = "gssmr"
[log]
diff --git a/dot/build_spec.go b/dot/build_spec.go
index 7abe4a2727..f29fcce8f4 100644
--- a/dot/build_spec.go
+++ b/dot/build_spec.go
@@ -142,10 +142,13 @@ func BuildFromDB(path string) (*BuildSpec, error) {
}
tmpGen.Name = gData.Name
tmpGen.ID = gData.ID
- // todo figure out how to assign bootnodes (see issue #1030)
- //tmpGen.Bootnodes = gData.(*genesis.Data).Bootnodes
+ tmpGen.Bootnodes = make([]string, len(gData.Bootnodes))
tmpGen.ProtocolID = gData.ProtocolID
+ for i, bn := range gData.Bootnodes {
+ tmpGen.Bootnodes[i] = string(bn)
+ }
+
bs := &BuildSpec{
genesis: tmpGen,
}
diff --git a/dot/config.go b/dot/config.go
index 048f522e13..0a5f2462dc 100644
--- a/dot/config.go
+++ b/dot/config.go
@@ -29,8 +29,8 @@ import (
log "github.com/ChainSafe/log15"
)
-// TODO: create separate types for toml config and internal config, needed since we don't want to expose all
-// the internal config options, also type conversions might be needed from toml -> internal types
+// TODO: update config to have toml rules and perhaps un-export some fields, since we don't want to expose all
+// the internal config options, also type conversions might be needed from toml -> internal types (#1848)
// Config is a collection of configurations throughout the system
type Config struct {
@@ -77,8 +77,8 @@ type InitConfig struct {
// AccountConfig is to marshal/unmarshal account config vars
type AccountConfig struct {
- Key string // TODO: change to array
- Unlock string // TODO: change to array
+ Key string
+ Unlock string // TODO: change to []int (#1849)
}
// NetworkConfig is to marshal/unmarshal toml network config vars
@@ -99,8 +99,6 @@ type CoreConfig struct {
Roles byte
BabeAuthority bool
GrandpaAuthority bool
- SlotDuration uint64
- EpochLength uint64
WasmInterpreter string
}
diff --git a/dot/config_test.go b/dot/config_test.go
index 194a1cbf38..d029c331f6 100644
--- a/dot/config_test.go
+++ b/dot/config_test.go
@@ -40,7 +40,5 @@ func TestExportConfig(t *testing.T) {
require.Nil(t, err)
file := ExportConfig(cfg, cfgFile.Name())
-
- // TODO: improve dot tests #687
require.NotNil(t, file)
}
diff --git a/dot/core/messages_test.go b/dot/core/messages_test.go
index 8f6e244019..53354bb989 100644
--- a/dot/core/messages_test.go
+++ b/dot/core/messages_test.go
@@ -79,10 +79,8 @@ func createExtrinsic(t *testing.T, rt runtime.Instance, genHash common.Hash, non
return extBytes
}
-func TestService_ProcessBlockAnnounceMessage(t *testing.T) {
- // TODO: move to sync package
- net := new(MockNetwork) // nolint
-
+func TestService_HandleBlockProduced(t *testing.T) {
+ net := new(MockNetwork)
cfg := &Config{
Network: net,
Keystore: keystore.NewGlobalKeystore(),
diff --git a/dot/core/service.go b/dot/core/service.go
index 83feefb4a5..c0abf48c80 100644
--- a/dot/core/service.go
+++ b/dot/core/service.go
@@ -30,6 +30,7 @@ import (
"github.com/ChainSafe/gossamer/lib/keystore"
"github.com/ChainSafe/gossamer/lib/runtime"
rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage"
+ "github.com/ChainSafe/gossamer/lib/runtime/wasmer"
"github.com/ChainSafe/gossamer/lib/services"
"github.com/ChainSafe/gossamer/lib/transaction"
"github.com/ChainSafe/gossamer/pkg/scale"
@@ -242,7 +243,7 @@ func (s *Service) handleBlock(block *types.Block, state *rtstorage.TrieState) er
}
// check if there was a runtime code substitution
- if err := s.handleCodeSubstitution(block.Header.Hash()); err != nil {
+ if err := s.handleCodeSubstitution(block.Header.Hash(), state); err != nil {
logger.Crit("failed to substitute runtime code", "error", err)
return err
}
@@ -266,7 +267,7 @@ func (s *Service) handleBlock(block *types.Block, state *rtstorage.TrieState) er
return nil
}
-func (s *Service) handleCodeSubstitution(hash common.Hash) error {
+func (s *Service) handleCodeSubstitution(hash common.Hash, state *rtstorage.TrieState) error {
value := s.codeSubstitute[hash]
if value == "" {
return nil
@@ -283,9 +284,22 @@ func (s *Service) handleCodeSubstitution(hash common.Hash) error {
return err
}
- // TODO: this needs to create a new runtime instance, otherwise it will update
+ // this needs to create a new runtime instance, otherwise it will update
// the blocks that reference the current runtime version to use the code substition
- err = rt.UpdateRuntimeCode(code)
+ cfg := &wasmer.Config{
+ Imports: wasmer.ImportsNodeRuntime,
+ }
+
+ cfg.Storage = state
+ cfg.Keystore = rt.Keystore()
+ cfg.NodeStorage = rt.NodeStorage()
+ cfg.Network = rt.NetworkService()
+
+ if rt.Validator() {
+ cfg.Role = 4
+ }
+
+ next, err := wasmer.NewInstance(code, cfg)
if err != nil {
return err
}
@@ -295,6 +309,7 @@ func (s *Service) handleCodeSubstitution(hash common.Hash) error {
return err
}
+ s.blockState.StoreRuntime(hash, next)
return nil
}
@@ -383,15 +398,14 @@ func (s *Service) handleChainReorg(prev, curr common.Hash) error {
continue
}
- // TODO: decode extrinsic and make sure it's not an inherent.
- // currently we are attempting to re-add inherents, causing lots of "'Bad input data provided to validate_transaction" errors.
for _, ext := range *body {
- logger.Debug("validating transaction on re-org chain", "extrinsic", ext)
+ logger.Info("validating transaction on re-org chain", "extrinsic", ext)
encExt, err := scale.Marshal(ext)
if err != nil {
return err
}
+ // decode extrinsic and make sure it's not an inherent.
decExt := &types.ExtrinsicData{}
err = decExt.DecodeVersion(encExt)
if err != nil {
@@ -406,7 +420,7 @@ func (s *Service) handleChainReorg(prev, curr common.Hash) error {
externalExt := types.Extrinsic(append([]byte{byte(types.TxnExternal)}, encExt...))
txv, err := rt.ValidateTransaction(externalExt)
if err != nil {
- logger.Debug("failed to validate transaction", "error", err, "extrinsic", ext)
+ logger.Info("failed to validate transaction", "error", err, "extrinsic", ext)
continue
}
@@ -430,7 +444,7 @@ func (s *Service) maintainTransactionPool(block *types.Block) {
// re-validate transactions in the pool and move them to the queue
txs := s.transactionState.PendingInPool()
for _, tx := range txs {
- // TODO: re-add this
+ // TODO: re-add this, need to update tests (#904)
// val, err := s.rt.ValidateTransaction(tx.Extrinsic)
// if err != nil {
// // failed to validate tx, remove it from the pool or queue
@@ -453,7 +467,7 @@ func (s *Service) maintainTransactionPool(block *types.Block) {
}
// InsertKey inserts keypair into the account keystore
-// TODO: define which keystores need to be updated and create separate insert funcs for each
+// TODO: define which keystores need to be updated and create separate insert funcs for each (#1850)
func (s *Service) InsertKey(kp crypto.Keypair) {
s.keys.Acco.Insert(kp)
}
diff --git a/dot/core/service_test.go b/dot/core/service_test.go
index 965aa6a5d7..9ce906c5d7 100644
--- a/dot/core/service_test.go
+++ b/dot/core/service_test.go
@@ -34,7 +34,7 @@ import (
"github.com/ChainSafe/gossamer/lib/keystore"
"github.com/ChainSafe/gossamer/lib/runtime"
runtimemocks "github.com/ChainSafe/gossamer/lib/runtime/mocks"
- "github.com/ChainSafe/gossamer/lib/runtime/storage"
+ rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage"
"github.com/ChainSafe/gossamer/lib/runtime/wasmer"
"github.com/ChainSafe/gossamer/lib/transaction"
"github.com/ChainSafe/gossamer/lib/trie"
@@ -44,40 +44,6 @@ import (
"github.com/stretchr/testify/require"
)
-func addTestBlocksToState(t *testing.T, depth int, blockState BlockState) {
- _ = addTestBlocksToStateWithParent(t, blockState.BestBlockHash(), depth, blockState)
-}
-
-func addTestBlocksToStateWithParent(t *testing.T, previousHash common.Hash, depth int, blockState BlockState) []*types.Header {
- prevHeader, err := blockState.(*state.BlockState).GetHeader(previousHash)
- require.NoError(t, err)
- previousNum := prevHeader.Number
-
- var headers []*types.Header
- rt, err := blockState.GetRuntime(nil)
- require.NoError(t, err)
-
- for i := 1; i <= depth; i++ {
- block := &types.Block{
- Header: types.Header{
- ParentHash: previousHash,
- Number: big.NewInt(int64(i)).Add(previousNum, big.NewInt(int64(i))),
- Digest: types.NewDigest(),
- },
- Body: types.Body{},
- }
-
- previousHash = block.Header.Hash()
-
- blockState.StoreRuntime(block.Header.Hash(), rt)
- err := blockState.AddBlock(block)
- require.NoError(t, err)
- headers = append(headers, &block.Header)
- }
-
- return headers
-}
-
func TestMain(m *testing.M) {
wasmFilePaths, err := runtime.GenerateRuntimeWasmFile()
if err != nil {
@@ -94,12 +60,10 @@ func TestMain(m *testing.M) {
func TestStartService(t *testing.T) {
s := NewTestService(t, nil)
-
- // TODO: improve dot tests #687
require.NotNil(t, s)
err := s.Start()
- require.Nil(t, err)
+ require.NoError(t, err)
err = s.Stop()
require.NoError(t, err)
@@ -185,7 +149,7 @@ func TestService_HasKey_UnknownType(t *testing.T) {
func TestHandleChainReorg_NoReorg(t *testing.T) {
s := NewTestService(t, nil)
- addTestBlocksToState(t, 4, s.blockState.(*state.BlockState))
+ state.AddBlocksToState(t, s.blockState.(*state.BlockState), 4, false)
head, err := s.blockState.BestBlockHeader()
require.NoError(t, err)
@@ -195,8 +159,8 @@ func TestHandleChainReorg_NoReorg(t *testing.T) {
}
func TestHandleChainReorg_WithReorg_Trans(t *testing.T) {
+ t.Skip() // TODO: tx fails to validate in handleChainReorg() with "Invalid transaction" (#1026)
s := NewTestService(t, nil)
-
bs := s.blockState
parent, err := bs.BestBlockHeader()
@@ -235,7 +199,7 @@ func TestHandleChainReorg_WithReorg_Trans(t *testing.T) {
err = bs.AddBlock(block31)
require.NoError(t, err)
- nonce := uint64(1)
+ nonce := uint64(0)
// Add extrinsic to block `block31`
ext := createExtrinsic(t, rt, bs.GenesisHash(), nonce)
@@ -284,7 +248,7 @@ func TestHandleChainReorg_WithReorg_Transactions(t *testing.T) {
s := NewTestService(t, cfg)
height := 5
branch := 3
- addTestBlocksToState(t, height, s.blockState.(*state.BlockState))
+ state.AddBlocksToState(t, s.blockState.(*state.BlockState), height, false)
// create extrinsic
enc, err := scale.Marshal([]byte("nootwashere"))
@@ -339,7 +303,7 @@ func TestHandleChainReorg_WithReorg_Transactions(t *testing.T) {
}
func TestMaintainTransactionPool_EmptyBlock(t *testing.T) {
- // TODO" update these to real extrinsics on update to v0.8
+ // TODO: update these to real extrinsics on update to v0.9 (#904)
txs := []*transaction.ValidTransaction{
{
Extrinsic: []byte("a"),
@@ -561,7 +525,6 @@ func TestService_HandleRuntimeChanges(t *testing.T) {
}
func TestService_HandleCodeSubstitutes(t *testing.T) {
- t.Skip() // fix this, fails on CI
s := NewTestService(t, nil)
testRuntime, err := ioutil.ReadFile(runtime.POLKADOT_RUNTIME_FP)
@@ -577,7 +540,10 @@ func TestService_HandleCodeSubstitutes(t *testing.T) {
s.blockState.StoreRuntime(blockHash, rt)
- err = s.handleCodeSubstitution(blockHash)
+ ts, err := rtstorage.NewTrieState(trie.NewEmptyTrie())
+ require.NoError(t, err)
+
+ err = s.handleCodeSubstitution(blockHash, ts)
require.NoError(t, err)
codSub := s.codeSubstitutedState.LoadCodeSubstitutedBlockHash()
require.Equal(t, blockHash, codSub)
@@ -602,14 +568,17 @@ func TestService_HandleRuntimeChangesAfterCodeSubstitutes(t *testing.T) {
Body: *body,
}
- err = s.handleCodeSubstitution(blockHash)
+ ts, err := rtstorage.NewTrieState(trie.NewEmptyTrie())
+ require.NoError(t, err)
+
+ err = s.handleCodeSubstitution(blockHash, ts)
require.NoError(t, err)
require.Equal(t, codeHashBefore, parentRt.GetCodeHash()) // codeHash should remain unchanged after code substitute
testRuntime, err := ioutil.ReadFile(runtime.POLKADOT_RUNTIME_FP)
require.NoError(t, err)
- ts, err := s.storageState.TrieState(nil)
+ ts, err = s.storageState.TrieState(nil)
require.NoError(t, err)
ts.Set(common.CodeKey, testRuntime)
@@ -627,7 +596,7 @@ func TestService_HandleRuntimeChangesAfterCodeSubstitutes(t *testing.T) {
func TestTryQueryStore_WhenThereIsDataToRetrieve(t *testing.T) {
s := NewTestService(t, nil)
- storageStateTrie, err := storage.NewTrieState(trie.NewTrie(nil))
+ storageStateTrie, err := rtstorage.NewTrieState(trie.NewTrie(nil))
testKey, testValue := []byte("to"), []byte("0x1723712318238AB12312")
storageStateTrie.Set(testKey, testValue)
@@ -660,7 +629,7 @@ func TestTryQueryStore_WhenThereIsDataToRetrieve(t *testing.T) {
func TestTryQueryStore_WhenDoesNotHaveDataToRetrieve(t *testing.T) {
s := NewTestService(t, nil)
- storageStateTrie, err := storage.NewTrieState(trie.NewTrie(nil))
+ storageStateTrie, err := rtstorage.NewTrieState(trie.NewTrie(nil))
require.NoError(t, err)
header, err := types.NewHeader(s.blockState.GenesisHash(), storageStateTrie.MustRoot(),
@@ -770,7 +739,7 @@ func TestQueryStorate_WhenBlocksHasData(t *testing.T) {
func createNewBlockAndStoreDataAtBlock(t *testing.T, s *Service, key, value []byte, parentHash common.Hash, number int64) *types.Block {
t.Helper()
- storageStateTrie, err := storage.NewTrieState(trie.NewTrie(nil))
+ storageStateTrie, err := rtstorage.NewTrieState(trie.NewTrie(nil))
storageStateTrie.Set(key, value)
require.NoError(t, err)
diff --git a/dot/core/test_helpers.go b/dot/core/test_helpers.go
index a0d2ff0f6f..aad7aaa89a 100644
--- a/dot/core/test_helpers.go
+++ b/dot/core/test_helpers.go
@@ -118,7 +118,7 @@ func NewTestService(t *testing.T, cfg *Config) *Service {
rtCfg.NodeStorage = nodeStorage
- cfg.Runtime, err = wasmer.NewRuntimeFromGenesis(gen, rtCfg)
+ cfg.Runtime, err = wasmer.NewRuntimeFromGenesis(rtCfg)
require.NoError(t, err)
}
cfg.BlockState.StoreRuntime(cfg.BlockState.BestBlockHash(), cfg.Runtime)
diff --git a/dot/digest/digest.go b/dot/digest/digest.go
index ca87ac61a5..d9b8a2745e 100644
--- a/dot/digest/digest.go
+++ b/dot/digest/digest.go
@@ -33,7 +33,7 @@ var maxUint64 = uint64(math.MaxUint64)
var (
_ services.Service = &Handler{}
- logger log.Logger = log.New("pkg", "digest") // TODO: add to config options
+ logger log.Logger = log.New("pkg", "digest") // TODO: add to config options (#1851)
)
// Handler is used to handle consensus messages and relevant authority updates to BABE and GRANDPA
diff --git a/dot/digest/digest_test.go b/dot/digest/digest_test.go
index 39ac8896cb..86e4301dc3 100644
--- a/dot/digest/digest_test.go
+++ b/dot/digest/digest_test.go
@@ -35,42 +35,7 @@ import (
"github.com/stretchr/testify/require"
)
-// TODO: use these from core?
-func addTestBlocksToState(t *testing.T, depth int, blockState BlockState) []*types.Header {
- return addTestBlocksToStateWithParent(t, blockState.(*state.BlockState).BestBlockHash(), depth, blockState)
-}
-
-func addTestBlocksToStateWithParent(t *testing.T, previousHash common.Hash, depth int, blockState BlockState) []*types.Header {
- prevHeader, err := blockState.(*state.BlockState).GetHeader(previousHash)
- require.NoError(t, err)
- previousNum := prevHeader.Number
-
- headers := []*types.Header{}
-
- for i := 1; i <= depth; i++ {
- digest := types.NewDigest()
- err = digest.Add(*types.NewBabeSecondaryPlainPreDigest(0, uint64(i)).ToPreRuntimeDigest())
- require.NoError(t, err)
-
- block := &types.Block{
- Header: types.Header{
- ParentHash: previousHash,
- Number: big.NewInt(int64(i)).Add(previousNum, big.NewInt(int64(i))),
- Digest: digest,
- },
- Body: types.Body{},
- }
-
- previousHash = block.Header.Hash()
- err = blockState.(*state.BlockState).AddBlock(block)
- require.NoError(t, err)
- headers = append(headers, &block.Header)
- }
-
- return headers
-}
-
-func newTestHandler(t *testing.T, withBABE, withGrandpa bool) *Handler { //nolint
+func newTestHandler(t *testing.T) *Handler {
testDatadirPath, err := ioutil.TempDir("/tmp", "test-datadir-*")
require.NoError(t, err)
@@ -94,7 +59,7 @@ func newTestHandler(t *testing.T, withBABE, withGrandpa bool) *Handler { //nolin
}
func TestHandler_GrandpaScheduledChange(t *testing.T) {
- handler := newTestHandler(t, false, true)
+ handler := newTestHandler(t)
handler.Start()
defer handler.Stop()
@@ -127,13 +92,13 @@ func TestHandler_GrandpaScheduledChange(t *testing.T) {
err = handler.handleConsensusDigest(d, header)
require.NoError(t, err)
- headers := addTestBlocksToState(t, 2, handler.blockState)
+ headers, _ := state.AddBlocksToState(t, handler.blockState.(*state.BlockState), 2, false)
for i, h := range headers {
handler.blockState.(*state.BlockState).SetFinalisedHash(h.Hash(), uint64(i), 0)
}
// authorities should change on start of block 3 from start
- headers = addTestBlocksToState(t, 1, handler.blockState)
+ headers, _ = state.AddBlocksToState(t, handler.blockState.(*state.BlockState), 1, false)
for _, h := range headers {
handler.blockState.(*state.BlockState).SetFinalisedHash(h.Hash(), 3, 0)
}
@@ -151,7 +116,7 @@ func TestHandler_GrandpaScheduledChange(t *testing.T) {
}
func TestHandler_GrandpaForcedChange(t *testing.T) {
- handler := newTestHandler(t, false, true)
+ handler := newTestHandler(t)
handler.Start()
defer handler.Stop()
@@ -184,10 +149,8 @@ func TestHandler_GrandpaForcedChange(t *testing.T) {
err = handler.handleConsensusDigest(d, header)
require.NoError(t, err)
- addTestBlocksToState(t, 3, handler.blockState)
-
// authorities should change on start of block 4 from start
- addTestBlocksToState(t, 1, handler.blockState)
+ state.AddBlocksToState(t, handler.blockState.(*state.BlockState), 4, false)
time.Sleep(time.Millisecond * 100)
setID, err := handler.grandpaState.(*state.GrandpaState).GetCurrentSetID()
@@ -202,7 +165,7 @@ func TestHandler_GrandpaForcedChange(t *testing.T) {
}
func TestHandler_GrandpaPauseAndResume(t *testing.T) {
- handler := newTestHandler(t, false, true)
+ handler := newTestHandler(t)
handler.Start()
defer handler.Stop()
@@ -228,7 +191,7 @@ func TestHandler_GrandpaPauseAndResume(t *testing.T) {
require.NoError(t, err)
require.Equal(t, big.NewInt(int64(p.Delay)), nextPause)
- headers := addTestBlocksToState(t, 3, handler.blockState)
+ headers, _ := state.AddBlocksToState(t, handler.blockState.(*state.BlockState), 3, false)
for i, h := range headers {
handler.blockState.(*state.BlockState).SetFinalisedHash(h.Hash(), uint64(i), 0)
}
@@ -255,7 +218,7 @@ func TestHandler_GrandpaPauseAndResume(t *testing.T) {
err = handler.handleConsensusDigest(d, nil)
require.NoError(t, err)
- addTestBlocksToState(t, 3, handler.blockState)
+ state.AddBlocksToState(t, handler.blockState.(*state.BlockState), 3, false)
time.Sleep(time.Millisecond * 110)
require.Nil(t, handler.grandpaResume)
@@ -265,7 +228,7 @@ func TestHandler_GrandpaPauseAndResume(t *testing.T) {
}
func TestNextGrandpaAuthorityChange_OneChange(t *testing.T) {
- handler := newTestHandler(t, false, true)
+ handler := newTestHandler(t)
handler.Start()
defer handler.Stop()
@@ -305,7 +268,7 @@ func TestNextGrandpaAuthorityChange_OneChange(t *testing.T) {
}
func TestNextGrandpaAuthorityChange_MultipleChanges(t *testing.T) {
- handler := newTestHandler(t, false, true)
+ handler := newTestHandler(t)
handler.Start()
defer handler.Stop()
@@ -378,7 +341,7 @@ func TestNextGrandpaAuthorityChange_MultipleChanges(t *testing.T) {
}
func TestHandler_HandleBABEOnDisabled(t *testing.T) {
- handler := newTestHandler(t, true, false)
+ handler := newTestHandler(t)
header := &types.Header{
Number: big.NewInt(1),
}
@@ -420,7 +383,7 @@ func createHeaderWithPreDigest(t *testing.T, slotNumber uint64) *types.Header {
func TestHandler_HandleNextEpochData(t *testing.T) {
expData := common.MustHexToBytes("0x0108d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d01000000000000008eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a4801000000000000004d58630000000000000000000000000000000000000000000000000000000000")
- handler := newTestHandler(t, true, false)
+ handler := newTestHandler(t)
handler.Start()
defer handler.Stop()
@@ -473,7 +436,7 @@ func TestHandler_HandleNextEpochData(t *testing.T) {
}
func TestHandler_HandleNextConfigData(t *testing.T) {
- handler := newTestHandler(t, true, false)
+ handler := newTestHandler(t)
handler.Start()
defer handler.Stop()
diff --git a/dot/network/block_announce.go b/dot/network/block_announce.go
index 5bc5f4b31b..202c6cb235 100644
--- a/dot/network/block_announce.go
+++ b/dot/network/block_announce.go
@@ -203,8 +203,6 @@ func (s *Service) validateBlockAnnounceHandshake(from peer.ID, hs Handshake) err
data, ok := np.getInboundHandshakeData(from)
if ok {
data.handshake = hs
- // TODO: since this is used only for rpc system_peers only,
- // we can just set the inbound handshake and use that in Peers()
np.inboundHandshakeData.Store(from, data)
}
diff --git a/dot/network/connmgr.go b/dot/network/connmgr.go
index 3a726364f8..bb1806154f 100644
--- a/dot/network/connmgr.go
+++ b/dot/network/connmgr.go
@@ -156,8 +156,13 @@ func (cm *ConnManager) Connected(n network.Network, c network.Conn) {
cm.Lock()
defer cm.Unlock()
- // TODO: this should be updated to disconnect from (total_peers - maximum) peers, instead of just one peer
- if len(n.Peers()) > cm.max {
+ over := len(n.Peers()) - cm.max
+ if over <= 0 {
+ return
+ }
+
+ // if over the max peer count, disconnect from (total_peers - maximum) peers
+ for i := 0; i < over; i++ {
unprotPeers := cm.unprotectedPeers(n.Peers())
if len(unprotPeers) == 0 {
return
@@ -235,7 +240,8 @@ func (cm *ConnManager) Disconnected(n network.Network, c network.Conn) {
}
}()
- // TODO: if number of peers falls below the min desired peer count, we should try to connect to previously discovered peers
+ // TODO: if number of peers falls below the min desired peer count,
+ // we should try to connect to previously discovered peers (#1852)
}
// OpenedStream is called when a stream opened
diff --git a/dot/network/host.go b/dot/network/host.go
index 248388cd43..9e4f57d063 100644
--- a/dot/network/host.go
+++ b/dot/network/host.go
@@ -136,7 +136,7 @@ func newHost(ctx context.Context, cfg *Config) (*host, error) {
libp2p.NATPortMap(),
libp2p.Peerstore(ps),
libp2p.ConnectionManager(cm),
- libp2p.ChainOptions(libp2p.DefaultSecurity, libp2p.Security(secio.ID, secio.New)), // TODO: deprecate secio?
+ libp2p.ChainOptions(libp2p.DefaultSecurity, libp2p.Security(secio.ID, secio.New)), // TODO: deprecate secio (#1853)
libp2p.AddrsFactory(func(as []ma.Multiaddr) []ma.Multiaddr {
addrs := []ma.Multiaddr{}
for _, addr := range as {
diff --git a/dot/network/message.go b/dot/network/message.go
index 219bb87059..fc7e7f5064 100644
--- a/dot/network/message.go
+++ b/dot/network/message.go
@@ -165,7 +165,8 @@ func (bm *BlockRequestMessage) Decode(in []byte) error {
case *pb.BlockRequest_Hash:
startingBlock, err = variadic.NewUint64OrHash(common.BytesToHash(from.Hash))
case *pb.BlockRequest_Number:
- // TODO: we are receiving block requests w/ 4-byte From field; did the format change?
+ // TODO: we are receiving block requests w/ 4-byte From field; this should probably be
+ // 4-bytes as it represents a block number which is uint32 (#1854)
if len(from.Number) != 8 {
return errors.New("invalid BlockResponseMessage.From; uint64 is not 8 bytes")
}
diff --git a/dot/network/notifications.go b/dot/network/notifications.go
index 6aa5af7ba8..818b71dffc 100644
--- a/dot/network/notifications.go
+++ b/dot/network/notifications.go
@@ -334,7 +334,7 @@ func (s *Service) sendData(peer peer.ID, hs Handshake, info *notificationsProtoc
// TODO: ensure grandpa stores *all* previously received votes and discards them
// only when they are for already finalised rounds; currently this causes issues
// because a vote might be received slightly too early, causing a round mismatch err,
- // causing grandpa to discard the vote.
+ // causing grandpa to discard the vote. (#1855)
_, isConsensusMsg := msg.(*ConsensusMessage)
if !added && !isConsensusMsg {
return
diff --git a/dot/network/service.go b/dot/network/service.go
index 42a30af9f5..5e9fba5d41 100644
--- a/dot/network/service.go
+++ b/dot/network/service.go
@@ -364,7 +364,7 @@ func (s *Service) sentBlockIntervalTelemetry() {
&finalizedHash,
finalized.Number,
big.NewInt(int64(s.transactionHandler.TransactionsCount())),
- big.NewInt(0), // todo (ed) determine where to get used_state_cache_size
+ big.NewInt(0), // TODO: (ed) determine where to get used_state_cache_size (#1501)
))
if err != nil {
logger.Debug("problem sending system.interval telemetry message", "error", err)
@@ -374,7 +374,7 @@ func (s *Service) sentBlockIntervalTelemetry() {
}
func (*Service) handleConn(conn libp2pnetwork.Conn) {
- // TODO: update this for scoring
+ // TODO: update this for scoring (#1399)
}
// Stop closes running instances of the host and network services as well as
@@ -641,7 +641,7 @@ func (s *Service) handleLightMsg(stream libp2pnetwork.Stream, msg Message) error
return err
}
- // TODO(arijit): Remove once we implement the internal APIs. Added to increase code coverage.
+ // TODO(arijit): Remove once we implement the internal APIs. Added to increase code coverage. (#1856)
logger.Debug("LightResponse", "msg", resp.String())
err = s.host.writeToStream(stream, &resp)
@@ -729,13 +729,13 @@ func (s *Service) CollectGauge() map[string]int64 {
// HighestBlock returns the highest known block number
func (*Service) HighestBlock() int64 {
- // TODO: refactor this to get the data from the sync service
+ // TODO: refactor this to get the data from the sync service (#1857)
return 0
}
// StartingBlock return the starting block number that's currently being synced
func (*Service) StartingBlock() int64 {
- // TODO: refactor this to get the data from the sync service
+ // TODO: refactor this to get the data from the sync service (#1857)
return 0
}
diff --git a/dot/network/state.go b/dot/network/state.go
index 00388fbe4e..181107914f 100644
--- a/dot/network/state.go
+++ b/dot/network/state.go
@@ -43,7 +43,7 @@ type Syncer interface {
// If a request needs to be sent to the peer to retrieve the full block, this function will return it.
HandleBlockAnnounce(from peer.ID, msg *BlockAnnounceMessage) error
- // IsSynced exposes the internal synced state // TODO: use syncQueue for this
+ // IsSynced exposes the internal synced state
IsSynced() bool
// CreateBlockResponse is called upon receipt of a BlockRequestMessage to create the response
diff --git a/dot/network/stream_manager_test.go b/dot/network/stream_manager_test.go
index 4a70fd7223..92fd6e60ed 100644
--- a/dot/network/stream_manager_test.go
+++ b/dot/network/stream_manager_test.go
@@ -80,7 +80,7 @@ func TestStreamManager(t *testing.T) {
}
func TestStreamManager_KeepStream(t *testing.T) {
- t.Skip() // TODO: test is flaky
+ t.Skip() // TODO: test is flaky (#1026)
ctx, hosts, sms := setupStreamManagerTest(t)
ha, hb := hosts[0], hosts[1]
smA, smB := sms[0], sms[1]
diff --git a/dot/network/sync.go b/dot/network/sync.go
index 684b07ab71..9a0e677c61 100644
--- a/dot/network/sync.go
+++ b/dot/network/sync.go
@@ -25,7 +25,10 @@ import (
"github.com/libp2p/go-libp2p-core/peer"
)
-var maxBlockResponseSize uint64 = 1024 * 1024 * 4 // 4mb
+var (
+ maxBlockResponseSize uint64 = 1024 * 1024 * 4 // 4mb
+ blockRequestTimeout = time.Second * 5
+)
// DoBlockRequest sends a request to the given peer. If a response is received within a certain time period, it is returned, otherwise an error is returned.
func (s *Service) DoBlockRequest(to peer.ID, req *BlockRequestMessage) (*BlockResponseMessage, error) {
@@ -34,8 +37,7 @@ func (s *Service) DoBlockRequest(to peer.ID, req *BlockRequestMessage) (*BlockRe
s.host.h.ConnManager().Protect(to, "")
defer s.host.h.ConnManager().Unprotect(to, "")
- // TODO: make this a constant
- ctx, cancel := context.WithTimeout(s.ctx, time.Second*5)
+ ctx, cancel := context.WithTimeout(s.ctx, blockRequestTimeout)
defer cancel()
stream, err := s.host.h.NewStream(ctx, to, fullSyncID)
@@ -60,7 +62,7 @@ func (s *Service) receiveBlockResponse(stream libp2pnetwork.Stream) (*BlockRespo
// thus we should allocate buffers at startup and re-use them instead of allocating new ones each time.
//
// TODO: should we create another buffer pool for block response buffers?
- // for bootstrap this is ok since it's not parallelized, but will need to be updated for tip-mode
+ // for bootstrap this is ok since it's not parallelized, but will need to be updated for tip-mode (#1858)
s.blockResponseBufMu.Lock()
defer s.blockResponseBufMu.Unlock()
diff --git a/dot/network/test_helpers.go b/dot/network/test_helpers.go
index d9f50f170c..175ffa23bf 100644
--- a/dot/network/test_helpers.go
+++ b/dot/network/test_helpers.go
@@ -133,10 +133,7 @@ func (s *testStreamHandler) writeToStream(stream libp2pnetwork.Stream, msg Messa
}
func (s *testStreamHandler) readStream(stream libp2pnetwork.Stream, peer peer.ID, decoder messageDecoder, handler messageHandler) {
- var (
- maxMessageSize uint64 = maxBlockResponseSize // TODO: determine actual max message size
- msgBytes = make([]byte, maxMessageSize)
- )
+ msgBytes := make([]byte, maxBlockResponseSize)
defer func() {
s.exit = true
diff --git a/dot/network/utils.go b/dot/network/utils.go
index c7bcc80187..0636a27681 100644
--- a/dot/network/utils.go
+++ b/dot/network/utils.go
@@ -150,21 +150,25 @@ func uint64ToLEB128(in uint64) []byte {
return out
}
-func readLEB128ToUint64(r io.Reader, buf []byte) (uint64, error) {
+func readLEB128ToUint64(r io.Reader, buf []byte) (uint64, int, error) {
if len(buf) == 0 {
- return 0, errors.New("buffer has length 0")
+ return 0, 0, errors.New("buffer has length 0")
}
var out uint64
var shift uint
maxSize := 10 // Max bytes in LEB128 encoding of uint64 is 10.
+ bytesRead := 0
+
for {
- _, err := r.Read(buf[:1])
+ n, err := r.Read(buf[:1])
if err != nil {
- return 0, err
+ return 0, bytesRead, err
}
+ bytesRead += n
+
b := buf[0]
out |= uint64(0x7F&b) << shift
if b&0x80 == 0 {
@@ -173,12 +177,12 @@ func readLEB128ToUint64(r io.Reader, buf []byte) (uint64, error) {
maxSize--
if maxSize == 0 {
- return 0, fmt.Errorf("invalid LEB128 encoded data")
+ return 0, bytesRead, fmt.Errorf("invalid LEB128 encoded data")
}
shift += 7
}
- return out, nil
+ return out, bytesRead, nil
}
// readStream reads from the stream into the given buffer, returning the number of bytes read
@@ -191,9 +195,9 @@ func readStream(stream libp2pnetwork.Stream, buf []byte) (int, error) {
tot int
)
- length, err := readLEB128ToUint64(stream, buf[:1])
+ length, bytesRead, err := readLEB128ToUint64(stream, buf[:1])
if err != nil {
- return 0, fmt.Errorf("failed to read length: %w", err) // TODO: return bytes read from readLEB128ToUint64
+ return bytesRead, fmt.Errorf("failed to read length: %w", err)
}
if length == 0 {
diff --git a/dot/network/utils_test.go b/dot/network/utils_test.go
index ac2f5b3b16..5b09eb7dd1 100644
--- a/dot/network/utils_test.go
+++ b/dot/network/utils_test.go
@@ -115,7 +115,7 @@ func TestReadLEB128ToUint64(t *testing.T) {
_, err := buf.Write(tc.input)
require.NoError(t, err)
- ret, err := readLEB128ToUint64(buf, b[:1])
+ ret, _, err := readLEB128ToUint64(buf, b[:1])
require.NoError(t, err)
require.Equal(t, tc.output, ret)
}
@@ -129,6 +129,6 @@ func TestInvalidLeb128(t *testing.T) {
_, err := buf.Write(input)
require.NoError(t, err)
- _, err = readLEB128ToUint64(buf, b[:1])
+ _, _, err = readLEB128ToUint64(buf, b[:1])
require.Error(t, err)
}
diff --git a/dot/node.go b/dot/node.go
index 9b9b26c22a..a8e3118ad7 100644
--- a/dot/node.go
+++ b/dot/node.go
@@ -437,7 +437,7 @@ func loadRuntime(cfg *Config, ns *runtime.NodeStorage, stateSrvc *state.Service,
runtimeCode := make(map[string]runtime.Instance)
for i := range blocks {
hash := &blocks[i]
- code, err := stateSrvc.Storage.GetStorageByBlockHash(*hash, []byte(":code"))
+ code, err := stateSrvc.Storage.GetStorageByBlockHash(hash, []byte(":code"))
if err != nil {
return err
}
diff --git a/dot/node_test.go b/dot/node_test.go
index 855c75f239..7d4bbe8d77 100644
--- a/dot/node_test.go
+++ b/dot/node_test.go
@@ -17,7 +17,6 @@
package dot
import (
- "encoding/binary"
"math/big"
"reflect"
"sync"
@@ -113,7 +112,6 @@ func TestNewNode(t *testing.T) {
err = keystore.LoadKeystore("alice", ks.Babe)
require.NoError(t, err)
- // TODO: improve dot tests #687
cfg.Core.Roles = types.FullNodeRole
node, err := NewNode(cfg, ks, nil)
@@ -180,7 +178,6 @@ func TestStartNode(t *testing.T) {
err = keystore.LoadKeystore("alice", ks.Babe)
require.NoError(t, err)
- // TODO: improve dot tests #687
cfg.Core.Roles = types.FullNodeRole
node, err := NewNode(cfg, ks, nil)
@@ -314,6 +311,14 @@ func TestInitNode_LoadStorageRoot(t *testing.T) {
require.Equal(t, expectedRoot, stateRoot)
}
+// balanceKey returns the storage trie key for the balance of the account with the given public key
+func balanceKey(t *testing.T, key [32]byte) []byte {
+ accKey := append([]byte("balance:"), key[:]...)
+ hash, err := common.Blake2bHash(accKey)
+ require.NoError(t, err)
+ return hash[:]
+}
+
func TestInitNode_LoadBalances(t *testing.T) {
cfg := NewTestConfig(t)
require.NotNil(t, cfg)
@@ -355,13 +360,11 @@ func TestInitNode_LoadBalances(t *testing.T) {
kr, _ := keystore.NewSr25519Keyring()
alice := kr.Alice().Public().(*sr25519.PublicKey).AsBytes()
- bal, err := stateSrv.Storage.GetBalance(nil, alice)
+ bal, err := stateSrv.Storage.GetStorage(nil, balanceKey(t, alice))
require.NoError(t, err)
genbal := "0x0000000000000001"
- balbytes, _ := common.HexToBytes(genbal)
- expected := binary.LittleEndian.Uint64(balbytes)
-
+ expected, _ := common.HexToBytes(genbal)
require.Equal(t, expected, bal)
}
diff --git a/dot/rpc/modules/api.go b/dot/rpc/modules/api.go
index a62026146b..6ad000cb75 100644
--- a/dot/rpc/modules/api.go
+++ b/dot/rpc/modules/api.go
@@ -20,7 +20,7 @@ type StorageAPI interface {
GetStorage(root *common.Hash, key []byte) ([]byte, error)
GetStorageChild(root *common.Hash, keyToChild []byte) (*trie.Trie, error)
GetStorageFromChild(root *common.Hash, keyToChild, key []byte) ([]byte, error)
- GetStorageByBlockHash(bhash common.Hash, key []byte) ([]byte, error)
+ GetStorageByBlockHash(bhash *common.Hash, key []byte) ([]byte, error)
Entries(root *common.Hash) (map[string][]byte, error)
GetStateRootFromBlock(bhash *common.Hash) (*common.Hash, error)
GetKeysWithPrefix(root *common.Hash, prefix []byte) ([][]byte, error)
diff --git a/dot/rpc/modules/author.go b/dot/rpc/modules/author.go
index 023b9ad9cb..24831b6292 100644
--- a/dot/rpc/modules/author.go
+++ b/dot/rpc/modules/author.go
@@ -168,6 +168,7 @@ func (am *AuthorModule) InsertKey(r *http.Request, req *KeyInsertRequest, res *K
return err
}
+ // TODO: correctly use keystore type (#1850)
keyPair, err := keystore.DecodeKeyPairFromHex(keyBytes, keystore.DetermineKeyType(keyReq.Type))
if err != nil {
return err
diff --git a/dot/rpc/modules/chain_test.go b/dot/rpc/modules/chain_test.go
index 04fd5d559f..67795a9a6c 100644
--- a/dot/rpc/modules/chain_test.go
+++ b/dot/rpc/modules/chain_test.go
@@ -366,7 +366,7 @@ func newTestStateService(t *testing.T) *state.Service {
require.NoError(t, err)
}
- rt, err := wasmer.NewRuntimeFromGenesis(gen, rtCfg)
+ rt, err := wasmer.NewRuntimeFromGenesis(rtCfg)
require.NoError(t, err)
err = loadTestBlocks(t, genesisHeader.Hash(), stateSrvc.Block, rt)
diff --git a/dot/rpc/modules/grandpa.go b/dot/rpc/modules/grandpa.go
index 97efe84f93..53110e90c5 100644
--- a/dot/rpc/modules/grandpa.go
+++ b/dot/rpc/modules/grandpa.go
@@ -78,7 +78,7 @@ func (gm *GrandpaModule) ProveFinality(r *http.Request, req *ProveFinalityReques
// Leaving check in for linter
if req.authorityID != uint64(0) {
- // TODO: #1404 Check if functionality relevant
+ // TODO: Check if functionality relevant (#1404)
}
for _, block := range blocksToCheck {
diff --git a/dot/rpc/modules/grandpa_test.go b/dot/rpc/modules/grandpa_test.go
index 2233aaa65d..f17d65ad68 100644
--- a/dot/rpc/modules/grandpa_test.go
+++ b/dot/rpc/modules/grandpa_test.go
@@ -35,7 +35,7 @@ var kr, _ = keystore.NewEd25519Keyring()
func TestGrandpaProveFinality(t *testing.T) {
testStateService := newTestStateService(t)
- state.AddBlocksToState(t, testStateService.Block, 3)
+ state.AddBlocksToState(t, testStateService.Block, 3, false)
bestBlock, err := testStateService.Block.BestBlock()
if err != nil {
diff --git a/dot/rpc/modules/mocks/storage_api.go b/dot/rpc/modules/mocks/storage_api.go
index 1135575e89..d46456154d 100644
--- a/dot/rpc/modules/mocks/storage_api.go
+++ b/dot/rpc/modules/mocks/storage_api.go
@@ -109,11 +109,11 @@ func (_m *MockStorageAPI) GetStorage(root *common.Hash, key []byte) ([]byte, err
}
// GetStorageByBlockHash provides a mock function with given fields: bhash, key
-func (_m *MockStorageAPI) GetStorageByBlockHash(bhash common.Hash, key []byte) ([]byte, error) {
+func (_m *MockStorageAPI) GetStorageByBlockHash(bhash *common.Hash, key []byte) ([]byte, error) {
ret := _m.Called(bhash, key)
var r0 []byte
- if rf, ok := ret.Get(0).(func(common.Hash, []byte) []byte); ok {
+ if rf, ok := ret.Get(0).(func(*common.Hash, []byte) []byte); ok {
r0 = rf(bhash, key)
} else {
if ret.Get(0) != nil {
@@ -122,7 +122,7 @@ func (_m *MockStorageAPI) GetStorageByBlockHash(bhash common.Hash, key []byte) (
}
var r1 error
- if rf, ok := ret.Get(1).(func(common.Hash, []byte) error); ok {
+ if rf, ok := ret.Get(1).(func(*common.Hash, []byte) error); ok {
r1 = rf(bhash, key)
} else {
r1 = ret.Error(1)
diff --git a/dot/rpc/modules/state.go b/dot/rpc/modules/state.go
index c5443627ae..46d012f698 100644
--- a/dot/rpc/modules/state.go
+++ b/dot/rpc/modules/state.go
@@ -124,7 +124,6 @@ type StatePairResponse []interface{}
type StateStorageKeysResponse []string
// StateMetadataResponse holds the metadata
-//TODO: Determine actual type
type StateMetadataResponse string
//StateGetReadProofResponse holds the response format
@@ -174,7 +173,6 @@ func NewStateModule(net NetworkAPI, storage StorageAPI, core CoreAPI) *StateModu
// GetPairs returns the keys with prefix, leave empty to get all the keys.
func (sm *StateModule) GetPairs(_ *http.Request, req *StatePairRequest, res *StatePairResponse) error {
- // TODO implement change storage trie so that block hash parameter works (See issue #834)
var (
stateRootHash *common.Hash
err error
@@ -188,26 +186,37 @@ func (sm *StateModule) GetPairs(_ *http.Request, req *StatePairRequest, res *Sta
}
if req.Prefix == nil || *req.Prefix == "" || *req.Prefix == "0x" {
- pairs, err := sm.storageAPI.Entries(stateRootHash)
+ pairs, err := sm.storageAPI.Entries(stateRootHash) //nolint
if err != nil {
return err
}
+
for k, v := range pairs {
- *res = append(*res, []string{"0x" + hex.EncodeToString([]byte(k)), "0x" + hex.EncodeToString(v)})
+ *res = append(*res, []string{common.BytesToHex([]byte(k)), common.BytesToHex(v)})
}
- } else {
- // TODO this should return all keys with same prefix, currently only returning
- // matches. Implement when #837 is done.
- reqBytes, _ := common.HexToBytes(*req.Prefix)
- resI, err := sm.storageAPI.GetStorage(stateRootHash, reqBytes)
+
+ return nil
+ }
+
+ reqBytes, _ := common.HexToBytes(*req.Prefix)
+ keys, err := sm.storageAPI.GetKeysWithPrefix(stateRootHash, reqBytes)
+ if err != nil {
+ return err
+ }
+
+ if len(keys) == 0 {
+ *res = []interface{}{}
+ return nil
+ }
+
+ *res = make([]interface{}, len(keys))
+ for i, key := range keys {
+ val, err := sm.storageAPI.GetStorage(stateRootHash, key)
if err != nil {
return err
}
- if resI != nil {
- *res = append(*res, []string{"0x" + hex.EncodeToString(reqBytes), "0x" + hex.EncodeToString(resI)})
- } else {
- *res = []interface{}{}
- }
+
+ (*res)[i] = []string{common.BytesToHex(key), common.BytesToHex(val)}
}
return nil
@@ -248,7 +257,6 @@ func (sm *StateModule) GetKeysPaged(_ *http.Request, req *StateStorageKeyRequest
// GetMetadata calls runtime Metadata_metadata function
func (sm *StateModule) GetMetadata(_ *http.Request, req *StateRuntimeMetadataQuery, res *StateMetadataResponse) error {
- // TODO implement change storage trie so that block hash parameter works (See issue #834)
metadata, err := sm.coreAPI.GetMetadata(req.Bhash)
if err != nil {
return err
@@ -292,7 +300,6 @@ func (sm *StateModule) GetReadProof(_ *http.Request, req *StateGetReadProofReque
// GetRuntimeVersion Get the runtime version at a given block.
// If no block hash is provided, the latest version gets returned.
-// TODO currently only returns latest version, add functionality to lookup runtime by block hash (see issue #834)
func (sm *StateModule) GetRuntimeVersion(_ *http.Request, req *StateRuntimeVersionRequest, res *StateRuntimeVersionResponse) error {
rtVersion, err := sm.coreAPI.GetRuntimeVersion(req.Bhash)
if err != nil {
@@ -320,7 +327,7 @@ func (sm *StateModule) GetStorage(_ *http.Request, req *StateStorageRequest, res
reqBytes, _ := common.HexToBytes(req.Key) // no need to catch error here
if req.Bhash != nil {
- item, err = sm.storageAPI.GetStorageByBlockHash(*req.Bhash, reqBytes)
+ item, err = sm.storageAPI.GetStorageByBlockHash(req.Bhash, reqBytes)
if err != nil {
return err
}
@@ -340,7 +347,6 @@ func (sm *StateModule) GetStorage(_ *http.Request, req *StateStorageRequest, res
// GetStorageHash returns the hash of a storage entry at a block's state.
// If no block hash is provided, the latest value is returned.
-// TODO implement change storage trie so that block hash parameter works (See issue #834)
func (sm *StateModule) GetStorageHash(_ *http.Request, req *StateStorageHashRequest, res *StateStorageHashResponse) error {
var (
item []byte
@@ -350,7 +356,7 @@ func (sm *StateModule) GetStorageHash(_ *http.Request, req *StateStorageHashRequ
reqBytes, _ := common.HexToBytes(req.Key)
if req.Bhash != nil {
- item, err = sm.storageAPI.GetStorageByBlockHash(*req.Bhash, reqBytes)
+ item, err = sm.storageAPI.GetStorageByBlockHash(req.Bhash, reqBytes)
if err != nil {
return err
}
@@ -370,7 +376,6 @@ func (sm *StateModule) GetStorageHash(_ *http.Request, req *StateStorageHashRequ
// GetStorageSize returns the size of a storage entry at a block's state.
// If no block hash is provided, the latest value is used.
-// TODO implement change storage trie so that block hash parameter works (See issue #834)
func (sm *StateModule) GetStorageSize(_ *http.Request, req *StateStorageSizeRequest, res *StateStorageSizeResponse) error {
var (
item []byte
@@ -380,7 +385,7 @@ func (sm *StateModule) GetStorageSize(_ *http.Request, req *StateStorageSizeRequ
reqBytes, _ := common.HexToBytes(req.Key)
if req.Bhash != nil {
- item, err = sm.storageAPI.GetStorageByBlockHash(*req.Bhash, reqBytes)
+ item, err = sm.storageAPI.GetStorageByBlockHash(req.Bhash, reqBytes)
if err != nil {
return err
}
@@ -428,10 +433,9 @@ func (sm *StateModule) QueryStorage(_ *http.Request, req *StateStorageQueryRange
return nil
}
-// SubscribeRuntimeVersion isn't implemented properly yet.
-// TODO make this actually a subscription that pushes data
+// SubscribeRuntimeVersion initialised a runtime version subscription and returns the current version
+// See dot/rpc/subscription
func (sm *StateModule) SubscribeRuntimeVersion(r *http.Request, _ *StateStorageQueryRangeRequest, res *StateRuntimeVersionResponse) error {
- // TODO implement change storage trie so that block hash parameter works (See issue #834)
return sm.GetRuntimeVersion(r, nil, res)
}
diff --git a/dot/rpc/modules/state_test.go b/dot/rpc/modules/state_test.go
index 4f7ec7c30b..5e8c8b9220 100644
--- a/dot/rpc/modules/state_test.go
+++ b/dot/rpc/modules/state_test.go
@@ -374,7 +374,7 @@ func TestStateModule_QueryStorage(t *testing.T) {
}
func TestStateModule_GetMetadata(t *testing.T) {
- t.Skip() // TODO: update expected_metadata
+ t.Skip() // TODO: update expected_metadata (#1026)
sm, hash, _ := setupStateModule(t)
randomHash, err := common.HexToHash(RandomHash)
require.NoError(t, err)
diff --git a/dot/rpc/modules/system.go b/dot/rpc/modules/system.go
index de70e93af9..35cdabc064 100644
--- a/dot/rpc/modules/system.go
+++ b/dot/rpc/modules/system.go
@@ -82,7 +82,7 @@ type SyncStateResponse struct {
func NewSystemModule(net NetworkAPI, sys SystemAPI, core CoreAPI,
storage StorageAPI, txAPI TransactionStateAPI, blockAPI BlockAPI) *SystemModule {
return &SystemModule{
- networkAPI: net, // TODO: migrate to network state
+ networkAPI: net,
systemAPI: sys,
coreAPI: core,
storageAPI: storage,
diff --git a/dot/rpc/modules/system_test.go b/dot/rpc/modules/system_test.go
index 7239d317ca..0d726dd242 100644
--- a/dot/rpc/modules/system_test.go
+++ b/dot/rpc/modules/system_test.go
@@ -317,7 +317,6 @@ func setupSystemModule(t *testing.T) *SystemModule {
require.NoError(t, err)
core := newCoreService(t, chain)
- // TODO (ed) add transactions to txQueue and add test for those
txQueue := state.NewTransactionState()
return NewSystemModule(net, nil, core, chain.Storage, txQueue, nil)
}
diff --git a/dot/rpc/subscription/websocket.go b/dot/rpc/subscription/websocket.go
index 167a0a9ccf..f407ef195e 100644
--- a/dot/rpc/subscription/websocket.go
+++ b/dot/rpc/subscription/websocket.go
@@ -314,10 +314,10 @@ func (c *WSConn) initExtrinsicWatch(reqID float64, params interface{}) (Listener
c.safeSend(NewSubscriptionResponseJSON(esl.subID, reqID))
// TODO (ed) since HandleSubmittedExtrinsic has been called we assume the extrinsic is in the tx queue
- // should we add a channel to tx queue so we're notified when it's in the queue (See issue #1535)
+ // should we add a channel to tx queue so we're notified when it's in the queue (#1535)
c.safeSend(newSubscriptionResponse(authorExtrinsicUpdatesMethod, esl.subID, "ready"))
- // todo (ed) determine which peer extrinsic has been broadcast to, and set status
+ // todo (ed) determine which peer extrinsic has been broadcast to, and set status (#1535)
return esl, err
}
diff --git a/dot/services.go b/dot/services.go
index 62b41bf2a6..c1f12dd409 100644
--- a/dot/services.go
+++ b/dot/services.go
@@ -197,8 +197,6 @@ func createBABEService(cfg *Config, st *state.Service, ks keystore.Keystore, cs
TransactionState: st.Transaction,
EpochState: st.Epoch,
BlockImportHandler: cs,
- EpochLength: cfg.Core.EpochLength,
- SlotDuration: cfg.Core.SlotDuration, // TODO: remove this, should only be modified via runtime constant
Authority: cfg.Core.BabeAuthority,
IsDev: cfg.Global.ID == "dev",
}
@@ -346,14 +344,13 @@ func createRPCService(cfg *Config, ns *runtime.NodeStorage, stateSrvc *state.Ser
return rpc.NewHTTPServer(rpcConfig)
}
-// System service
-// creates a service for providing system related information
+// createSystemService creates a systemService for providing system related information
func createSystemService(cfg *types.SystemInfo, stateSrvc *state.Service) (*system.Service, error) {
genesisData, err := stateSrvc.Base.LoadGenesisData()
if err != nil {
return nil, err
}
- // TODO: use data from genesisData for SystemInfo once they are in database (See issue #1248)
+
return system.NewService(cfg, genesisData), nil
}
diff --git a/dot/services_test.go b/dot/services_test.go
index 03d71432c3..624cb8f2c3 100644
--- a/dot/services_test.go
+++ b/dot/services_test.go
@@ -49,8 +49,6 @@ func TestCreateStateService(t *testing.T) {
stateSrvc, err := createStateService(cfg)
require.Nil(t, err)
-
- // TODO: improve dot tests #687
require.NotNil(t, stateSrvc)
}
@@ -64,7 +62,6 @@ func TestCreateCoreService(t *testing.T) {
defer utils.RemoveTestDir(t)
- // TODO: improve dot tests #687
cfg.Core.Roles = types.FullNodeRole
cfg.Core.BabeAuthority = false
cfg.Core.GrandpaAuthority = false
@@ -165,8 +162,6 @@ func TestCreateNetworkService(t *testing.T) {
networkSrvc, err := createNetworkService(cfg, stateSrvc)
require.NoError(t, err)
-
- // TODO: improve dot tests #687
require.NotNil(t, networkSrvc)
}
@@ -180,7 +175,6 @@ func TestCreateRPCService(t *testing.T) {
defer utils.RemoveTestDir(t)
- // TODO: improve dot tests #687
cfg.Core.Roles = types.FullNodeRole
cfg.Core.BabeAuthority = false
cfg.Core.GrandpaAuthority = false
@@ -225,7 +219,6 @@ func TestCreateBABEService(t *testing.T) {
defer utils.RemoveTestDir(t)
- // TODO: improve dot tests #687
cfg.Core.Roles = types.FullNodeRole
cfg.Init.Genesis = genFile.Name()
@@ -265,7 +258,6 @@ func TestCreateGrandpaService(t *testing.T) {
defer utils.RemoveTestDir(t)
- // TODO: improve dot tests #687
cfg.Core.Roles = types.AuthorityRole
cfg.Init.Genesis = genFile.Name()
diff --git a/dot/state/block.go b/dot/state/block.go
index 4865d1d582..a69b15a523 100644
--- a/dot/state/block.go
+++ b/dot/state/block.go
@@ -395,21 +395,19 @@ func (bs *BlockState) AddBlock(block *types.Block) error {
// AddBlockWithArrivalTime adds a block to the blocktree and the DB with the given arrival time
func (bs *BlockState) AddBlockWithArrivalTime(block *types.Block, arrivalTime time.Time) error {
- err := bs.setArrivalTime(block.Header.Hash(), arrivalTime)
- if err != nil {
+ // add block to blocktree
+ if err := bs.bt.AddBlock(&block.Header, uint64(arrivalTime.UnixNano())); err != nil {
return err
}
- prevHead := bs.bt.DeepestBlockHash()
-
- // add block to blocktree
- err = bs.bt.AddBlock(&block.Header, uint64(arrivalTime.UnixNano()))
- if err != nil {
+ if err := bs.setArrivalTime(block.Header.Hash(), arrivalTime); err != nil {
return err
}
+ prevHead := bs.bt.DeepestBlockHash()
+
// add the header to the DB
- err = bs.SetHeader(&block.Header)
+ err := bs.SetHeader(&block.Header)
if err != nil {
return err
}
@@ -469,7 +467,6 @@ func (bs *BlockState) handleAddedBlock(prev, curr common.Hash) error {
batch := bs.db.NewBatch()
for _, hash := range subchain {
- // TODO: set number from ancestor.Number + i ?
header, err := bs.GetHeader(hash)
if err != nil {
return fmt.Errorf("failed to get header in subchain: %w", err)
diff --git a/dot/state/block_notify_test.go b/dot/state/block_notify_test.go
index 9df42600a7..8f226d67f7 100644
--- a/dot/state/block_notify_test.go
+++ b/dot/state/block_notify_test.go
@@ -36,7 +36,7 @@ func TestImportChannel(t *testing.T) {
defer bs.FreeImportedBlockNotifierChannel(ch)
- AddBlocksToState(t, bs, 3)
+ AddBlocksToState(t, bs, 3, false)
for i := 0; i < 3; i++ {
select {
@@ -63,7 +63,7 @@ func TestFinalizedChannel(t *testing.T) {
defer bs.FreeFinalisedNotifierChannel(ch)
- chain, _ := AddBlocksToState(t, bs, 3)
+ chain, _ := AddBlocksToState(t, bs, 3, false)
for _, b := range chain {
bs.SetFinalisedHash(b.Hash(), 1, 0)
@@ -106,7 +106,7 @@ func TestImportChannel_Multi(t *testing.T) {
}
time.Sleep(time.Millisecond * 10)
- AddBlocksToState(t, bs, 1)
+ AddBlocksToState(t, bs, 1, false)
wg.Wait()
}
@@ -121,7 +121,7 @@ func TestFinalizedChannel_Multi(t *testing.T) {
chs[i] = bs.GetFinalisedNotifierChannel()
}
- chain, _ := AddBlocksToState(t, bs, 1)
+ chain, _ := AddBlocksToState(t, bs, 1, false)
var wg sync.WaitGroup
wg.Add(num)
diff --git a/dot/state/block_test.go b/dot/state/block_test.go
index f683b6ff6e..871ced5ae7 100644
--- a/dot/state/block_test.go
+++ b/dot/state/block_test.go
@@ -200,7 +200,7 @@ func TestGetSlotForBlock(t *testing.T) {
func TestIsBlockOnCurrentChain(t *testing.T) {
bs := newTestBlockState(t, testGenesisHeader)
- currChain, branchChains := AddBlocksToState(t, bs, 3)
+ currChain, branchChains := AddBlocksToState(t, bs, 3, false)
for _, header := range currChain {
onChain, err := bs.isBlockOnCurrentChain(header)
@@ -223,7 +223,7 @@ func TestIsBlockOnCurrentChain(t *testing.T) {
func TestAddBlock_BlockNumberToHash(t *testing.T) {
bs := newTestBlockState(t, testGenesisHeader)
- currChain, branchChains := AddBlocksToState(t, bs, 8)
+ currChain, branchChains := AddBlocksToState(t, bs, 8, false)
bestHash := bs.BestBlockHash()
bestHeader, err := bs.BestBlockHeader()
@@ -302,19 +302,17 @@ func TestFinalizedHash(t *testing.T) {
func TestFinalization_DeleteBlock(t *testing.T) {
bs := newTestBlockState(t, testGenesisHeader)
- AddBlocksToState(t, bs, 5)
+ AddBlocksToState(t, bs, 5, false)
btBefore := bs.bt.DeepCopy()
- t.Log(btBefore)
before := bs.bt.GetAllBlocks()
leaves := bs.Leaves()
- // TODO: why isn't arrival time set?
- // for _, n := range before {
- // has, err := bs.HasArrivalTime(n)
- // require.NoError(t, err)
- // require.True(t, has, n)
- // }
+ for _, n := range before {
+ has, err := bs.HasArrivalTime(n)
+ require.NoError(t, err)
+ require.True(t, has, n)
+ }
// pick block to finalise
fin := leaves[len(leaves)-1]
@@ -322,7 +320,6 @@ func TestFinalization_DeleteBlock(t *testing.T) {
require.NoError(t, err)
after := bs.bt.GetAllBlocks()
- t.Log(bs.bt)
isIn := func(arr []common.Hash, b common.Hash) bool {
for _, a := range arr {
@@ -399,6 +396,7 @@ func TestGetHashByNumber(t *testing.T) {
}
func TestAddBlock_WithReOrg(t *testing.T) {
+ t.Skip() // TODO: this should be fixed after state refactor PR
bs := newTestBlockState(t, testGenesisHeader)
header1a := &types.Header{
diff --git a/dot/state/initialize.go b/dot/state/initialize.go
index 79f3483e4a..4440e148b0 100644
--- a/dot/state/initialize.go
+++ b/dot/state/initialize.go
@@ -194,7 +194,7 @@ func (s *Service) CreateGenesisRuntime(t *trie.Trie, gen *genesis.Genesis) (runt
rtCfg.Storage = genTrie
rtCfg.LogLvl = s.logLvl
- r, err := wasmer.NewRuntimeFromGenesis(gen, rtCfg)
+ r, err := wasmer.NewRuntimeFromGenesis(rtCfg)
if err != nil {
return nil, fmt.Errorf("failed to create genesis runtime: %w", err)
}
diff --git a/dot/state/service.go b/dot/state/service.go
index 46998348db..5a42565534 100644
--- a/dot/state/service.go
+++ b/dot/state/service.go
@@ -236,7 +236,7 @@ func (s *Service) Rewind(toBlock int64) error {
// TODO: this is broken, it needs to set the latest finalised header after
// rewinding to some block number, but there is no reverse lookup function
- // for best block -> best finalised before that block
+ // for block -> (round, setID) where it was finalised (#1859)
err = s.Block.SetFinalisedHash(header.Hash(), 0, 0)
if err != nil {
return err
diff --git a/dot/state/service_test.go b/dot/state/service_test.go
index b7c995e293..01f8aa45bb 100644
--- a/dot/state/service_test.go
+++ b/dot/state/service_test.go
@@ -132,7 +132,7 @@ func TestService_BlockTree(t *testing.T) {
require.NoError(t, err)
// add blocks to state
- AddBlocksToState(t, stateA.Block, 10)
+ AddBlocksToState(t, stateA.Block, 10, false)
err = stateA.Stop()
require.NoError(t, err)
@@ -307,7 +307,7 @@ func TestService_Rewind(t *testing.T) {
err = serv.Grandpa.setSetIDChangeAtBlock(3, big.NewInt(10))
require.NoError(t, err)
- AddBlocksToState(t, serv.Block, 12)
+ AddBlocksToState(t, serv.Block, 12, false)
err = serv.Rewind(6)
require.NoError(t, err)
diff --git a/dot/state/storage.go b/dot/state/storage.go
index 96e9643ca7..fb282109f9 100644
--- a/dot/state/storage.go
+++ b/dot/state/storage.go
@@ -17,7 +17,6 @@
package state
import (
- "encoding/binary"
"errors"
"fmt"
"sync"
@@ -106,7 +105,7 @@ func (s *StorageState) StoreTrie(ts *rtstorage.TrieState, header *types.Header)
if s.syncing {
// keep only the trie at the head of the chain when syncing
- // TODO: probably remove this when memory usage improves
+ // TODO: probably remove this when memory usage improves (#1494)
s.tries.Range(func(k, _ interface{}) bool {
s.tries.Delete(k)
return true
@@ -241,21 +240,41 @@ func (s *StorageState) GetStorage(root *common.Hash, key []byte) ([]byte, error)
}
// GetStorageByBlockHash returns the value at the given key at the given block hash
-func (s *StorageState) GetStorageByBlockHash(bhash common.Hash, key []byte) ([]byte, error) {
- header, err := s.blockState.GetHeader(bhash)
- if err != nil {
- return nil, err
+func (s *StorageState) GetStorageByBlockHash(bhash *common.Hash, key []byte) ([]byte, error) {
+ var (
+ root common.Hash
+ err error
+ )
+
+ if bhash != nil {
+ header, err := s.blockState.GetHeader(*bhash) //nolint
+ if err != nil {
+ return nil, err
+ }
+
+ root = header.StateRoot
+ } else {
+ root, err = s.StorageRoot()
+ if err != nil {
+ return nil, err
+ }
}
- return s.GetStorage(&header.StateRoot, key)
+ return s.GetStorage(&root, key)
}
// GetStateRootFromBlock returns the state root hash of a given block hash
func (s *StorageState) GetStateRootFromBlock(bhash *common.Hash) (*common.Hash, error) {
+ if bhash == nil {
+ b := s.blockState.BestBlockHash()
+ bhash = &b
+ }
+
header, err := s.blockState.GetHeader(*bhash)
if err != nil {
return nil, err
}
+
return &header.StateRoot, nil
}
@@ -264,12 +283,6 @@ func (s *StorageState) StorageRoot() (common.Hash, error) {
return s.blockState.BestBlockStateRoot()
}
-// EnumeratedTrieRoot not implemented
-func (*StorageState) EnumeratedTrieRoot(_ [][]byte) {
- //TODO
- panic("not implemented")
-}
-
// Entries returns Entries from the trie with the given state root
func (s *StorageState) Entries(root *common.Hash) (map[string][]byte, error) {
tr, err := s.loadTrie(root)
@@ -330,25 +343,6 @@ func (s *StorageState) GenerateTrieProof(stateRoot common.Hash, keys [][]byte) (
return trie.GenerateProof(stateRoot[:], keys, s.db)
}
-// GetBalance gets the balance for an account with the given public key
-func (s *StorageState) GetBalance(hash *common.Hash, key [32]byte) (uint64, error) {
- skey, err := common.BalanceKey(key)
- if err != nil {
- return 0, err
- }
-
- bal, err := s.GetStorage(hash, skey)
- if err != nil {
- return 0, err
- }
-
- if len(bal) != 8 {
- return 0, nil
- }
-
- return binary.LittleEndian.Uint64(bal), nil
-}
-
func (s *StorageState) pruneStorage(closeCh chan interface{}) {
for {
select {
diff --git a/dot/state/storage_notify.go b/dot/state/storage_notify.go
index 0d632c09ec..e9f3dbfbe1 100644
--- a/dot/state/storage_notify.go
+++ b/dot/state/storage_notify.go
@@ -93,7 +93,7 @@ func (s *StorageState) notifyObserver(root common.Hash, o Observer) error {
ent := t.TrieEntries()
for k, v := range ent {
if k != ":code" {
- // todo, currently we're ignoring :code since this is a lot of data
+ // currently we're ignoring :code since this is a lot of data
kv := &KeyValue{
Key: common.MustHexToBytes(fmt.Sprintf("0x%x", k)),
Value: v,
diff --git a/dot/state/storage_test.go b/dot/state/storage_test.go
index 3a78043bf9..c14fa5bbbd 100644
--- a/dot/state/storage_test.go
+++ b/dot/state/storage_test.go
@@ -71,7 +71,8 @@ func TestStorage_GetStorageByBlockHash(t *testing.T) {
err = storage.blockState.AddBlock(block)
require.NoError(t, err)
- res, err := storage.GetStorageByBlockHash(block.Header.Hash(), key)
+ hash := block.Header.Hash()
+ res, err := storage.GetStorageByBlockHash(&hash, key)
require.NoError(t, err)
require.Equal(t, value, res)
}
diff --git a/dot/state/test_helpers.go b/dot/state/test_helpers.go
index 64e8f87a60..3757d35d87 100644
--- a/dot/state/test_helpers.go
+++ b/dot/state/test_helpers.go
@@ -56,22 +56,21 @@ type testBranch struct {
depth int
}
-// AddBlocksToState adds blocks to a BlockState up to depth, with random branches
-func AddBlocksToState(t *testing.T, blockState *BlockState, depth int) ([]*types.Header, []*types.Header) {
- previousHash := blockState.BestBlockHash()
-
- branches := []testBranch{}
+// AddBlocksToState adds `depth` number of blocks to the BlockState, optionally with random branches
+func AddBlocksToState(t *testing.T, blockState *BlockState, depth int, withBranches bool) ([]*types.Header, []*types.Header) {
+ var (
+ currentChain, branchChains []*types.Header
+ branches []testBranch
+ )
arrivalTime := time.Now()
- currentChain := []*types.Header{}
- branchChains := []*types.Header{}
-
head, err := blockState.BestBlockHeader()
require.NoError(t, err)
+ previousHash := head.Hash()
// create base tree
startNum := int(head.Number.Int64())
- for i := startNum + 1; i <= depth; i++ {
+ for i := startNum + 1; i <= depth+startNum; i++ {
d := types.NewBabePrimaryPreDigest(0, uint64(i), [32]byte{}, [64]byte{})
digest := types.NewDigest()
_ = digest.Add(*d.ToPreRuntimeDigest())
@@ -90,7 +89,7 @@ func AddBlocksToState(t *testing.T, blockState *BlockState, depth int) ([]*types
hash := block.Header.Hash()
err := blockState.AddBlockWithArrivalTime(block, arrivalTime)
- require.Nil(t, err)
+ require.NoError(t, err)
previousHash = hash
@@ -106,6 +105,10 @@ func AddBlocksToState(t *testing.T, blockState *BlockState, depth int) ([]*types
arrivalTime = arrivalTime.Add(inc)
}
+ if !withBranches {
+ return currentChain, nil
+ }
+
// create tree branches
for _, branch := range branches {
previousHash = branch.hash
diff --git a/dot/sync/chain_processor.go b/dot/sync/chain_processor.go
index 3cca2876fa..9333a4e600 100644
--- a/dot/sync/chain_processor.go
+++ b/dot/sync/chain_processor.go
@@ -126,7 +126,7 @@ func (s *chainProcessor) processBlockData(bd *types.BlockData) error {
// TODO: fix this; sometimes when the node shuts down the "best block" isn't stored properly,
// so when the node restarts it has blocks higher than what it thinks is the best, causing it not to sync
// if we update the node to only store finalised blocks in the database, this should be fixed and the entire
- // code block can be removed
+ // code block can be removed (#1784)
block, err := s.blockState.GetBlockByHash(bd.Hash) //nolint
if err != nil {
logger.Debug("failed to get header", "hash", bd.Hash, "error", err)
@@ -150,7 +150,7 @@ func (s *chainProcessor) processBlockData(bd *types.BlockData) error {
// TODO: this is probably unnecessary, since the state is already in the database
// however, this case shouldn't be hit often, since it's only hit if the node state
- // is rewinded or if the node shuts down unexpectedly
+ // is rewinded or if the node shuts down unexpectedly (#1784)
state, err := s.storageState.TrieState(&block.Header.StateRoot)
if err != nil {
logger.Warn("failed to load state for block", "block", block.Header.Hash(), "error", err)
diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go
index 617627ab0b..69a9e8063a 100644
--- a/dot/sync/chain_sync.go
+++ b/dot/sync/chain_sync.go
@@ -36,7 +36,7 @@ import (
const (
// maxWorkers is the maximum number of parallel sync workers
- // TODO: determine ideal value
+ // TODO: determine ideal value (#1659)
maxWorkers = 12
)
@@ -60,7 +60,7 @@ func (s chainSyncState) String() string {
}
}
-// TODO: determine ideal limit for pending blocks set
+// TODO: determine ideal limit for pending blocks set (#1659)
var pendingBlocksLimit = maxResponseSize * 32
// peerState tracks our peers's best reported blocks
@@ -223,7 +223,7 @@ func (cs *chainSync) setBlockAnnounce(from peer.ID, header *types.Header) error
return err
}
- // TODO: is it ok to assume if a node announces a block that it has it + its ancestors?
+ // TODO: is it ok to assume if a node announces a block that it has it + its ancestors? (#1659)
return cs.setPeerHead(from, header.Hash(), header.Number)
}
@@ -269,7 +269,7 @@ func (cs *chainSync) setPeerHead(p peer.ID, hash common.Hash, number *big.Int) e
// chain), and also the highest finalised block is higher than that number.
// thus the peer is on an invalid chain
if fin.Number.Cmp(ps.number) >= 0 {
- // TODO: downscore this peer, or temporarily don't sync from them?
+ // TODO: downscore this peer, or temporarily don't sync from them? (#1399)
// perhaps we need another field in `peerState` to mark whether the state is valid or not
return errPeerOnInvalidFork
}
@@ -314,7 +314,6 @@ func (cs *chainSync) logSyncSpeed() {
select {
case <-t.C:
- // TODO: why does this function not return when ctx is cancelled???
if cs.ctx.Err() != nil {
return
}
@@ -397,8 +396,6 @@ func (cs *chainSync) sync() {
// handle errors. in the case that a peer did not respond to us in time,
// temporarily add them to the ignore list.
- // TODO: periodically clear out ignore list, currently is done if (ignore list >= peer list)
-
switch {
case errors.Is(res.err.err, context.Canceled):
return
@@ -492,7 +489,7 @@ func (cs *chainSync) setMode(mode chainSyncState) {
// getTarget takes the average of all peer heads
// TODO: should we just return the highest? could be an attack vector potentially, if a peer reports some very large
// head block number, it would leave us in bootstrap mode forever
-// it would be better to have some sort of standard deviation calculation and discard any outliers
+// it would be better to have some sort of standard deviation calculation and discard any outliers (#1861)
func (cs *chainSync) getTarget() *big.Int {
count := int64(0)
sum := big.NewInt(0)
@@ -595,7 +592,7 @@ func (cs *chainSync) dispatchWorker(w *worker) {
}
for _, req := range reqs {
- // TODO: if we find a good peer, do sync with them, right now it re-selects a peer each time
+ // TODO: if we find a good peer, do sync with them, right now it re-selects a peer each time (#1399)
if err := cs.doSync(req); err != nil {
// failed to sync, set worker error and put into result queue
w.err = err
@@ -629,7 +626,7 @@ func (cs *chainSync) doSync(req *network.BlockRequestMessage) *workerError {
// send out request and potentially receive response, error if timeout
logger.Trace("sending out block request", "request", req)
- // TODO: use scoring to determine what peer to try to sync from first
+ // TODO: use scoring to determine what peer to try to sync from first (#1399)
idx, _ := rand.Int(rand.Reader, big.NewInt(int64(len(peers))))
who := peers[idx.Int64()]
resp, err := cs.network.DoBlockRequest(who, req)
@@ -904,7 +901,7 @@ func workerToRequests(w *worker) ([]*network.BlockRequestMessage, error) {
var end *common.Hash
if !w.targetHash.Equal(common.EmptyHash) {
- end = &w.targetHash // TODO: change worker targetHash to ptr?
+ end = &w.targetHash
}
reqs[i] = &network.BlockRequestMessage{
diff --git a/dot/sync/syncer_test.go b/dot/sync/syncer_test.go
index c14f7b6f91..2381ff3b48 100644
--- a/dot/sync/syncer_test.go
+++ b/dot/sync/syncer_test.go
@@ -121,7 +121,7 @@ func newTestSyncer(t *testing.T) *Service {
rtCfg.CodeHash, err = cfg.StorageState.LoadCodeHash(nil)
require.NoError(t, err)
- instance, err := wasmer.NewRuntimeFromGenesis(gen, rtCfg)
+ instance, err := wasmer.NewRuntimeFromGenesis(rtCfg)
require.NoError(t, err)
cfg.BlockState.StoreRuntime(cfg.BlockState.BestBlockHash(), instance)
diff --git a/dot/sync/test_helpers.go b/dot/sync/test_helpers.go
index 10002398bb..0b165a568e 100644
--- a/dot/sync/test_helpers.go
+++ b/dot/sync/test_helpers.go
@@ -73,8 +73,9 @@ func BuildBlock(t *testing.T, instance runtime.Instance, parent *types.Header, e
require.NoError(t, err)
vtx := transaction.NewValidTransaction(ext, txn)
- _, err = instance.ApplyExtrinsic(ext) // TODO: Determine error for ret
+ ret, err := instance.ApplyExtrinsic(ext) //nolint
require.NoError(t, err)
+ require.Equal(t, ret, []byte{0, 0})
body, err = babe.ExtrinsicsToBody(inExt, []*transaction.ValidTransaction{vtx})
require.NoError(t, err)
diff --git a/dot/sync/tip_syncer.go b/dot/sync/tip_syncer.go
index 79bde3c011..45987f56b1 100644
--- a/dot/sync/tip_syncer.go
+++ b/dot/sync/tip_syncer.go
@@ -176,7 +176,7 @@ func (s *tipSyncer) handleTick() ([]*worker, error) {
for _, block := range s.pendingBlocks.getBlocks() {
if block.number.Cmp(fin.Number) <= 0 {
- // TODO: delete from pending set (this should not happen, it should have already been deleted)
+ // delete from pending set (this should not happen, it should have already been deleted)
s.pendingBlocks.removeBlock(block.hash)
continue
}
diff --git a/dot/telemetry/telemetry.go b/dot/telemetry/telemetry.go
index 873ed4273a..2b9fd4081f 100644
--- a/dot/telemetry/telemetry.go
+++ b/dot/telemetry/telemetry.go
@@ -96,7 +96,7 @@ func (h *Handler) AddConnections(conns []*genesis.TelemetryEndpoint) {
for _, v := range conns {
c, _, err := websocket.DefaultDialer.Dial(v.Endpoint, nil)
if err != nil {
- // todo (ed) try reconnecting if there is an error connecting
+ // TODO: try reconnecting if there is an error connecting (#1862)
h.log.Debug("issue adding telemetry connection", "error", err)
continue
}
diff --git a/dot/types/babe.go b/dot/types/babe.go
index 176b5e25bc..9a6d81de79 100644
--- a/dot/types/babe.go
+++ b/dot/types/babe.go
@@ -95,7 +95,7 @@ func (d *EpochDataRaw) ToEpochData() (*EpochData, error) {
type ConfigData struct {
C1 uint64
C2 uint64
- SecondarySlots byte // TODO: this is unused, will need to update BABE verifier to use this
+ SecondarySlots byte // TODO: this is unused, will need to update BABE verifier to use this (#1863)
}
// GetSlotFromHeader returns the BABE slot from the given header
diff --git a/dot/types/extrinsic.go b/dot/types/extrinsic.go
index d575766fde..89c51acfca 100644
--- a/dot/types/extrinsic.go
+++ b/dot/types/extrinsic.go
@@ -27,31 +27,11 @@ import (
// Extrinsic is a generic transaction whose format is verified in the runtime
type Extrinsic []byte
-// ExtrinsicData is a transaction which embeds the `ctypes.Extrinsic` and has additional functionality.
-type ExtrinsicData struct {
- ctypes.Extrinsic
-}
-
// NewExtrinsic creates a new Extrinsic given a byte slice
func NewExtrinsic(e []byte) Extrinsic {
return Extrinsic(e)
}
-// DecodeVersion decodes only the version field of the Extrinsic.
-func (e *ExtrinsicData) DecodeVersion(encExt Extrinsic) error {
- decoder := scale.NewDecoder(bytes.NewReader(encExt))
- _, err := decoder.DecodeUintCompact()
- if err != nil {
- return err
- }
-
- err = decoder.Decode(&e.Version)
- if err != nil {
- return err
- }
- return nil
-}
-
// Hash returns the blake2b hash of the extrinsic
func (e Extrinsic) Hash() common.Hash {
hash, err := common.Blake2bHash(e)
@@ -79,3 +59,19 @@ func BytesArrayToExtrinsics(b [][]byte) []Extrinsic {
}
return exts
}
+
+// ExtrinsicData is a transaction which embeds the `ctypes.Extrinsic` and has additional functionality.
+type ExtrinsicData struct {
+ ctypes.Extrinsic
+}
+
+// DecodeVersion decodes only the version field of the Extrinsic.
+func (e *ExtrinsicData) DecodeVersion(encExt Extrinsic) error {
+ decoder := scale.NewDecoder(bytes.NewReader(encExt))
+ _, err := decoder.DecodeUintCompact()
+ if err != nil {
+ return err
+ }
+
+ return decoder.Decode(&e.Version)
+}
diff --git a/dot/utils.go b/dot/utils.go
index 4f67021322..5a7eeca688 100644
--- a/dot/utils.go
+++ b/dot/utils.go
@@ -121,14 +121,13 @@ func NewTestGenesisFile(t *testing.T, cfg *Config) *os.File {
// NewTestGenesisAndRuntime create a new test runtime and a new test genesis
// file with the test runtime stored in raw data and returns the genesis file
-// nolint
func NewTestGenesisAndRuntime(t *testing.T) string {
dir := utils.NewTestDir(t)
_ = wasmer.NewTestInstance(t, runtime.NODE_RUNTIME)
runtimeFilePath := runtime.GetAbsolutePath(runtime.NODE_RUNTIME_FP)
- runtimeData, err := ioutil.ReadFile(runtimeFilePath)
+ runtimeData, err := ioutil.ReadFile(filepath.Clean(runtimeFilePath))
require.Nil(t, err)
gen := NewTestGenesis(t)
@@ -157,8 +156,6 @@ func NewTestGenesisAndRuntime(t *testing.T) string {
func NewTestConfig(t *testing.T) *Config {
dir := utils.NewTestDir(t)
- // TODO: use default config instead of gssmr config for test config #776
-
cfg := &Config{
Global: GlobalConfig{
Name: GssmrConfig().Global.Name,
@@ -230,7 +227,7 @@ func WriteConfig(data []byte, fp string) *os.File {
return newFile
}
-// CreateJSONRawFile will generate an Json File
+// CreateJSONRawFile will generate a JSON genesis file with raw storage
func CreateJSONRawFile(bs *BuildSpec, fp string) *os.File {
data, err := bs.ToJSONRaw()
if err != nil {
@@ -240,8 +237,7 @@ func CreateJSONRawFile(bs *BuildSpec, fp string) *os.File {
return WriteConfig(data, fp)
}
-// RandomNodeName generate a new random name
-// if there is no name configured to the node
+// RandomNodeName generates a new random name if there is no name configured for the node
func RandomNodeName() string {
entropy, _ := bip39.NewEntropy(128)
randomNamesString, _ := bip39.NewMnemonic(entropy)
diff --git a/dot/utils_test.go b/dot/utils_test.go
index 940440bfd9..63be0d3dd3 100644
--- a/dot/utils_test.go
+++ b/dot/utils_test.go
@@ -24,7 +24,6 @@ import (
"time"
"github.com/ChainSafe/gossamer/lib/genesis"
- "github.com/ChainSafe/gossamer/lib/runtime/wasmer"
"github.com/ChainSafe/gossamer/lib/trie"
"github.com/ChainSafe/gossamer/lib/utils"
"github.com/stretchr/testify/require"
@@ -33,20 +32,14 @@ import (
// TestNewConfig tests the NewTestConfig method
func TestNewConfig(t *testing.T) {
cfg := NewTestConfig(t)
-
defer utils.RemoveTestDir(t)
-
- // TODO: improve dot tests #687
require.NotNil(t, cfg)
}
// TestNewConfigAndFile tests the NewTestConfigWithFile method
func TestNewConfigAndFile(t *testing.T) {
testCfg, testCfgFile := NewTestConfigWithFile(t)
-
defer utils.RemoveTestDir(t)
-
- // TODO: improve dot tests #687
require.NotNil(t, testCfg)
require.NotNil(t, testCfgFile)
}
@@ -85,12 +78,6 @@ func TestNewTestGenesisFile(t *testing.T) {
require.Equal(t, genRaw.Genesis.Raw["top"], genHR.Genesis.Raw["top"])
}
-func TestNewRuntimeFromGenesis(t *testing.T) {
- gen := NewTestGenesis(t)
- _, err := wasmer.NewRuntimeFromGenesis(gen, &wasmer.Config{})
- require.NoError(t, err)
-}
-
func TestDeepCopyVsSnapshot(t *testing.T) {
cfg := NewTestConfig(t)
require.NotNil(t, cfg)
diff --git a/lib/babe/babe.go b/lib/babe/babe.go
index ab5c8331ad..4a4171d095 100644
--- a/lib/babe/babe.go
+++ b/lib/babe/babe.go
@@ -54,7 +54,7 @@ type Service struct {
blockImportHandler BlockImportHandler
// BABE authority keypair
- keypair *sr25519.Keypair // TODO: change to BABE keystore
+ keypair *sr25519.Keypair // TODO: change to BABE keystore (#1864)
// Epoch configuration data
slotDuration time.Duration
@@ -68,21 +68,17 @@ type Service struct {
// ServiceConfig represents a BABE configuration
type ServiceConfig struct {
- LogLvl log.Lvl
- BlockState BlockState
- StorageState StorageState
- TransactionState TransactionState
- EpochState EpochState
- BlockImportHandler BlockImportHandler
- Keypair *sr25519.Keypair
- Runtime runtime.Instance
- AuthData []types.Authority
- IsDev bool
- ThresholdNumerator uint64 // for development purposes
- ThresholdDenominator uint64 // for development purposes
- SlotDuration uint64 // for development purposes; in milliseconds
- EpochLength uint64 // for development purposes; in slots
- Authority bool
+ LogLvl log.Lvl
+ BlockState BlockState
+ StorageState StorageState
+ TransactionState TransactionState
+ EpochState EpochState
+ BlockImportHandler BlockImportHandler
+ Keypair *sr25519.Keypair
+ Runtime runtime.Instance
+ AuthData []types.Authority
+ IsDev bool
+ Authority bool
}
// NewService returns a new Babe Service using the provided VRF keys and runtime
@@ -116,7 +112,6 @@ func NewService(cfg *ServiceConfig) (*Service, error) {
blockState: cfg.BlockState,
storageState: cfg.StorageState,
epochState: cfg.EpochState,
- epochLength: cfg.EpochLength,
keypair: cfg.Keypair,
transactionState: cfg.TransactionState,
slotToProof: make(map[uint64]*VrfOutputAndProof),
@@ -161,54 +156,23 @@ func (b *Service) setupParameters(cfg *ServiceConfig) error {
}
b.epochData.randomness = epochData.Randomness
-
- configData, err := b.epochState.GetLatestConfigData()
+ b.epochData.authorities = epochData.Authorities
+ b.slotDuration, err = b.epochState.GetSlotDuration()
if err != nil {
return err
}
- // if slot duration is set via the config file, overwrite the runtime value
- switch {
- case cfg.SlotDuration > 0 && cfg.IsDev: // TODO: remove this, needs to be set via runtime
- b.slotDuration, err = time.ParseDuration(fmt.Sprintf("%dms", cfg.SlotDuration))
- case cfg.SlotDuration > 0 && !cfg.IsDev:
- err = errors.New("slot duration modified in config for non-dev chain")
- default:
- b.slotDuration, err = b.epochState.GetSlotDuration()
- }
+ b.epochLength, err = b.epochState.GetEpochLength()
if err != nil {
return err
}
- switch {
- case cfg.EpochLength != 0 && cfg.IsDev: // TODO: remove this, needs to be set via runtime
- b.epochLength = cfg.EpochLength
- case cfg.EpochLength > 0 && !cfg.IsDev:
- err = errors.New("epoch length modified in config for non-dev chain")
- default:
- b.epochLength, err = b.epochState.GetEpochLength()
- }
+ configData, err := b.epochState.GetLatestConfigData()
if err != nil {
return err
}
- switch {
- case cfg.AuthData != nil && cfg.IsDev: // TODO: remove this, needs to be set via runtime
- b.epochData.authorities = cfg.AuthData
- case cfg.AuthData != nil && !cfg.IsDev:
- return errors.New("authority data modified in config for non-dev chain")
- default:
- b.epochData.authorities = epochData.Authorities
- }
-
- switch {
- case cfg.ThresholdDenominator != 0 && cfg.IsDev: // TODO: remove this, needs to be set via runtime
- b.epochData.threshold, err = CalculateThreshold(cfg.ThresholdNumerator, cfg.ThresholdDenominator, len(b.epochData.authorities))
- case cfg.ThresholdDenominator != 0 && !cfg.IsDev:
- err = errors.New("threshold modified in config for non-dev chain")
- default:
- b.epochData.threshold, err = CalculateThreshold(configData.C1, configData.C2, len(b.epochData.authorities))
- }
+ b.epochData.threshold, err = CalculateThreshold(configData.C1, configData.C2, len(b.epochData.authorities))
if err != nil {
return err
}
diff --git a/lib/babe/babe_test.go b/lib/babe/babe_test.go
index 1104bc28af..751c2bd395 100644
--- a/lib/babe/babe_test.go
+++ b/lib/babe/babe_test.go
@@ -32,11 +32,13 @@ import (
"github.com/ChainSafe/gossamer/lib/common"
"github.com/ChainSafe/gossamer/lib/crypto/sr25519"
"github.com/ChainSafe/gossamer/lib/genesis"
+ "github.com/ChainSafe/gossamer/lib/keystore"
"github.com/ChainSafe/gossamer/lib/runtime"
rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage"
"github.com/ChainSafe/gossamer/lib/runtime/wasmer"
"github.com/ChainSafe/gossamer/lib/trie"
"github.com/ChainSafe/gossamer/lib/utils"
+
log "github.com/ChainSafe/log15"
mock "github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
@@ -47,6 +49,8 @@ var (
emptyHash = trie.EmptyHash
testEpochIndex = uint64(0)
+ keyring, _ = keystore.NewSr25519Keyring()
+
maxThreshold = common.MaxUint128
minThreshold = &common.Uint128{}
@@ -70,7 +74,7 @@ var (
func createTestService(t *testing.T, cfg *ServiceConfig) *Service {
wasmer.DefaultTestLogLvl = 1
- gen, genTrie, genHeader := genesis.NewTestGenesisWithTrieAndHeader(t)
+ gen, genTrie, genHeader := genesis.NewDevGenesisWithTrieAndHeader(t)
genesisHeader = genHeader
var err error
@@ -85,8 +89,7 @@ func createTestService(t *testing.T, cfg *ServiceConfig) *Service {
cfg.BlockImportHandler.(*mocks.BlockImportHandler).On("HandleBlockProduced", mock.AnythingOfType("*types.Block"), mock.AnythingOfType("*storage.TrieState")).Return(nil)
if cfg.Keypair == nil {
- cfg.Keypair, err = sr25519.GenerateKeypair()
- require.NoError(t, err)
+ cfg.Keypair = keyring.Alice().(*sr25519.Keypair)
}
if cfg.AuthData == nil {
@@ -102,11 +105,10 @@ func createTestService(t *testing.T, cfg *ServiceConfig) *Service {
}
testDatadirPath, err := ioutil.TempDir("/tmp", "test-datadir-*") //nolint
+ require.NoError(t, err)
var dbSrv *state.Service
if cfg.BlockState == nil || cfg.StorageState == nil || cfg.EpochState == nil {
- require.NoError(t, err)
-
config := state.Config{
Path: testDatadirPath,
LogLevel: log.LvlInfo,
@@ -114,10 +116,6 @@ func createTestService(t *testing.T, cfg *ServiceConfig) *Service {
dbSrv = state.NewService(config)
dbSrv.UseMemDB()
- if cfg.EpochLength > 0 {
- genesisBABEConfig.EpochLength = cfg.EpochLength
- }
-
err = dbSrv.Initialise(gen, genHeader, genTrie)
require.NoError(t, err)
@@ -153,7 +151,7 @@ func createTestService(t *testing.T, cfg *ServiceConfig) *Service {
rtCfg.NodeStorage = nodeStorage
- cfg.Runtime, err = wasmer.NewRuntimeFromGenesis(gen, rtCfg)
+ cfg.Runtime, err = wasmer.NewRuntimeFromGenesis(rtCfg)
require.NoError(t, err)
}
cfg.BlockState.StoreRuntime(cfg.BlockState.BestBlockHash(), cfg.Runtime)
@@ -209,7 +207,7 @@ func newTestServiceSetupParameters(t *testing.T) (*Service, *state.EpochState, *
rtCfg := &wasmer.Config{}
rtCfg.Storage, err = rtstorage.NewTrieState(genTrie)
require.NoError(t, err)
- rt, err := wasmer.NewRuntimeFromGenesis(gen, rtCfg) //nolint
+ rt, err := wasmer.NewRuntimeFromGenesis(rtCfg)
require.NoError(t, err)
genCfg, err := rt.BabeConfiguration()
@@ -311,15 +309,6 @@ func TestService_setupParameters_configData(t *testing.T) {
require.Equal(t, threshold, s.epochData.threshold)
}
-func TestService_RunEpochLengthConfig(t *testing.T) {
- cfg := &ServiceConfig{
- EpochLength: 5,
- }
-
- babeService := createTestService(t, cfg)
- require.Equal(t, uint64(5), babeService.epochLength)
-}
-
func TestService_SlotDuration(t *testing.T) {
duration, err := time.ParseDuration("1000ms")
require.NoError(t, err)
diff --git a/lib/babe/build_test.go b/lib/babe/build_test.go
index ed0fef874f..2e52b1bc5c 100644
--- a/lib/babe/build_test.go
+++ b/lib/babe/build_test.go
@@ -127,7 +127,6 @@ func createTestExtrinsic(t *testing.T, rt runtime.Instance, genHash common.Hash,
func createTestBlock(t *testing.T, babeService *Service, parent *types.Header, exts [][]byte, slotNumber, epoch uint64) (*types.Block, Slot) { //nolint
// create proof that we can authorize this block
babeService.epochData.authorityIndex = 0
-
addAuthorshipProof(t, babeService, slotNumber, epoch)
for _, ext := range exts {
@@ -148,17 +147,9 @@ func createTestBlock(t *testing.T, babeService *Service, parent *types.Header, e
require.NoError(t, err)
// build block
- var block *types.Block
- for i := 0; i < 1; i++ { // retry if error
- block, err = babeService.buildBlock(parent, slot, rt)
- if err == nil {
- babeService.blockState.StoreRuntime(block.Header.Hash(), rt)
- return block, slot
- }
- }
-
+ block, err := babeService.buildBlock(parent, slot, rt)
require.NoError(t, err)
-
+ babeService.blockState.StoreRuntime(block.Header.Hash(), rt)
return block, slot
}
@@ -179,10 +170,12 @@ func TestBuildBlock_ok(t *testing.T) {
babeService.epochData.authorityIndex,
)
- // TODO: re-add extrinsic
- exts := [][]byte{}
+ parentHash := babeService.blockState.GenesisHash()
+ rt, err := babeService.blockState.GetRuntime(nil)
+ require.NoError(t, err)
- block, slot := createTestBlock(t, babeService, emptyHeader, exts, 1, testEpochIndex)
+ ext := createTestExtrinsic(t, rt, parentHash, 0)
+ block, slot := createTestBlock(t, babeService, emptyHeader, [][]byte{ext}, 1, testEpochIndex)
// create pre-digest
preDigest, err := builder.buildBlockPreDigest(slot)
diff --git a/lib/babe/epoch.go b/lib/babe/epoch.go
index 12605390e0..4c311aa09c 100644
--- a/lib/babe/epoch.go
+++ b/lib/babe/epoch.go
@@ -17,7 +17,6 @@
package babe
import (
- "errors"
"fmt"
)
@@ -53,7 +52,7 @@ func (b *Service) initiateEpoch(epoch uint64) error {
}
idx, err := b.getAuthorityIndex(data.Authorities)
- if err != nil && !errors.Is(err, ErrNotAuthority) { // TODO: this should be checked in the upper function
+ if err != nil {
return err
}
diff --git a/lib/babe/epoch_test.go b/lib/babe/epoch_test.go
index 45cb9313e8..e63b6d0a20 100644
--- a/lib/babe/epoch_test.go
+++ b/lib/babe/epoch_test.go
@@ -57,7 +57,7 @@ func TestInitiateEpoch_Epoch1(t *testing.T) {
err := bs.initiateEpoch(0)
require.NoError(t, err)
- state.AddBlocksToState(t, bs.blockState.(*state.BlockState), 1)
+ state.AddBlocksToState(t, bs.blockState.(*state.BlockState), 1, false)
// epoch 1, check that genesis EpochData and ConfigData was properly set
threshold := bs.epochData.threshold
diff --git a/lib/babe/median.go b/lib/babe/median.go
deleted file mode 100644
index 5a1332d59b..0000000000
--- a/lib/babe/median.go
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2019 ChainSafe Systems (ON) Corp.
-// This file is part of gossamer.
-//
-// The gossamer library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The gossamer library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the gossamer library. If not, see .
-
-package babe
-
-import (
- "errors"
- "fmt"
- "math/big"
- "sort"
- "time"
-)
-
-// slotTail is the number of blocks needed for us to run the median algorithm. in the spec, it's arbitrarily set to 1200.
-// TODO: will need to update this once finished simple slot time algo testing
-var slotTail = uint64(12)
-
-// returns the estimated current slot number, without median algorithm
-func (b *Service) estimateCurrentSlot() (uint64, error) {
- // estimate slot of highest block we've received
- head := b.blockState.BestBlockHash()
-
- slot, err := b.blockState.GetSlotForBlock(head)
- if err != nil {
- return 0, fmt.Errorf("cannot get slot for head of chain: %s", err)
- }
-
- // get arrival time of chain head in unix nanoseconds
- // note: this assumes that the block arrived within the slot it was produced, may be off
- arrivalTime, err := b.blockState.GetArrivalTime(head)
- if err != nil {
- return 0, fmt.Errorf("cannot get arrival time for head of chain: %s", err)
- }
-
- // use slot duration to count up
- for {
- if time.Since(arrivalTime) <= b.getSlotDuration() {
- return slot, nil
- }
-
- // increment slot, slot time
- arrivalTime = arrivalTime.Add(b.slotDuration)
- slot++
- }
-}
-
-// getCurrentSlot estimates the current slot, then uses the slotTime algorithm to determine the exact slot
-func (b *Service) getCurrentSlot() (uint64, error) {
- estimate, err := b.estimateCurrentSlot()
- if err != nil {
- return 0, err
- }
-
- for {
- slotTime, err := b.slotTime(estimate, slotTail)
- if err != nil {
- return 0, err
- }
-
- st := time.Unix(int64(slotTime), 0)
-
- if time.Since(st) <= b.getSlotDuration() {
- return estimate, nil
- }
-
- estimate++
- }
-}
-
-// slotTime calculates the slot time in the form of seconds since the unix epoch
-// for a given slot in seconds, returns 0 and an error if it can't be calculated
-func (b *Service) slotTime(slot, slotTail uint64) (uint64, error) {
- var at []uint64
-
- head := b.blockState.BestBlockHash()
- tail := new(big.Int).SetUint64(slotTail)
-
- deepestBlock, err := b.blockState.GetHeader(head)
- if err != nil {
- return 0, fmt.Errorf("cannot get deepest block: %s", err)
- }
-
- // check to make sure we have enough blocks before the deepest block to accurately calculate slot time
- if deepestBlock.Number.Cmp(tail) == -1 {
- return 0, fmt.Errorf("cannot calculate slot time: deepest block number %d less than or equal to slot tail %d", deepestBlock.Number, tail)
- }
-
- startNumber := tail.Sub(deepestBlock.Number, tail)
-
- start, err := b.blockState.GetBlockByNumber(startNumber)
- if err != nil {
- return 0, err
- }
-
- sd := uint64(b.getSlotDuration().Seconds())
-
- var currSlot uint64
- var so uint64
- var arrivalTime time.Time
-
- subchain, err := b.blockState.SubChain(start.Header.Hash(), deepestBlock.Hash())
- if err != nil {
- return 0, err
- }
-
- for _, hash := range subchain {
- currSlot, err = b.blockState.GetSlotForBlock(hash)
- if err != nil {
- return 0, err
- }
-
- so, err = slotOffset(currSlot, slot)
- if err != nil {
- return 0, err
- }
-
- arrivalTime, err = b.blockState.GetArrivalTime(hash)
- if err != nil {
- return 0, err
- }
-
- st := uint64(arrivalTime.Unix()) + (so * sd)
- at = append(at, st)
- }
-
- st, err := median(at)
- if err != nil {
- return 0, err
- }
- return st, nil
-}
-
-// median calculates the median of a uint64 slice
-// @TODO: Implement quickselect as an alternative to this.
-func median(l []uint64) (uint64, error) {
- // sort the list
- sort.Slice(l, func(i, j int) bool { return l[i] < l[j] })
-
- m := len(l)
- med := uint64(0)
- if m == 0 {
- return 0, errors.New("arrival times list is empty! ")
- } else if m%2 == 0 {
- med = (l[(m/2)-1] + l[(m/2)+1]) / 2
- } else {
- med = l[m/2]
- }
- return med, nil
-}
-
-// slotOffset returns the number of slots between slot
-func slotOffset(start, end uint64) (uint64, error) {
- os := end - start
- if end < start {
- return 0, errors.New("cannot have negative Slot Offset")
- }
- return os, nil
-}
diff --git a/lib/babe/median_test.go b/lib/babe/median_test.go
deleted file mode 100644
index 2a1c51f021..0000000000
--- a/lib/babe/median_test.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright 2019 ChainSafe Systems (ON) Corp.
-// This file is part of gossamer.
-//
-// The gossamer library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The gossamer library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the gossamer library. If not, see .
-
-package babe
-
-import (
- "math/big"
- "testing"
- "time"
-
- "github.com/ChainSafe/gossamer/dot/state"
- "github.com/ChainSafe/gossamer/dot/types"
- "github.com/stretchr/testify/require"
-)
-
-func TestMedian_OddLength(t *testing.T) {
- us := []uint64{3, 2, 1, 4, 5}
- res, err := median(us)
- require.NoError(t, err)
-
- var expected uint64 = 3
- require.Equal(t, expected, res)
-}
-
-func TestMedian_EvenLength(t *testing.T) {
- us := []uint64{1, 4, 2, 4, 5, 6}
- res, err := median(us)
- require.NoError(t, err)
-
- var expected uint64 = 4
- require.Equal(t, expected, res)
-}
-
-func TestSlotOffset_Failing(t *testing.T) {
- var st uint64 = 1000001
- var se uint64 = 1000000
-
- _, err := slotOffset(st, se)
- require.NotNil(t, err)
-}
-
-func TestSlotOffset(t *testing.T) {
- var st uint64 = 1000000
- var se uint64 = 1000001
-
- res, err := slotOffset(st, se)
- require.NoError(t, err)
-
- var expected uint64 = 1
- require.Equal(t, expected, res)
-}
-
-func addBlocksToState(t *testing.T, babeService *Service, depth int, blockState BlockState, startTime time.Time) {
- previousHash := blockState.BestBlockHash()
- previousAT := startTime
- duration, err := time.ParseDuration("1s")
- builder, _ := NewBlockBuilder(
- babeService.keypair,
- babeService.transactionState,
- babeService.blockState,
- babeService.slotToProof,
- babeService.epochData.authorityIndex,
- )
- require.NoError(t, err)
-
- for i := 1; i <= depth; i++ {
- // create proof that we can authorize this block
- babeService.epochData.threshold = maxThreshold
- babeService.epochData.authorityIndex = 0
- slotNumber := uint64(i)
-
- outAndProof, err := babeService.runLottery(slotNumber, testEpochIndex)
- require.NoError(t, err)
- require.NotNil(t, outAndProof, "proof was nil when over threshold")
-
- babeService.slotToProof[slotNumber] = outAndProof
-
- // create pre-digest
- slot := Slot{
- start: time.Now(),
- duration: duration,
- number: slotNumber,
- }
-
- predigest, err := builder.buildBlockPreDigest(slot)
- require.NoError(t, err)
-
- digest := types.NewDigest()
- err = digest.Add(*predigest)
- require.NoError(t, err)
- block := &types.Block{
- Header: types.Header{
- ParentHash: previousHash,
- Number: big.NewInt(int64(i)),
- Digest: digest,
- },
- Body: types.Body{},
- }
-
- arrivalTime := previousAT.Add(duration)
- previousHash = block.Header.Hash()
- previousAT = arrivalTime
-
- err = blockState.(*state.BlockState).AddBlockWithArrivalTime(block, arrivalTime)
- require.NoError(t, err)
- }
-}
-
-func TestSlotTime(t *testing.T) {
- babeService := createTestService(t, nil)
- addBlocksToState(t, babeService, 100, babeService.blockState, time.Now())
-
- res, err := babeService.slotTime(103, 20)
- require.NoError(t, err)
-
- dur, err := time.ParseDuration("127s")
- require.NoError(t, err)
-
- expected := time.Now().Add(dur)
- if int64(res) > expected.Unix()+3 && int64(res) < expected.Unix()-3 {
- t.Fatalf("Fail: got %d expected %d", res, expected.Unix())
- }
-}
-
-func TestEstimateCurrentSlot(t *testing.T) {
- babeService := createTestService(t, nil)
- // create proof that we can authorize this block
- babeService.epochData.threshold = maxThreshold
- babeService.epochData.authorityIndex = 0
-
- builder, _ := NewBlockBuilder(
- babeService.keypair,
- babeService.transactionState,
- babeService.blockState,
- babeService.slotToProof,
- babeService.epochData.authorityIndex,
- )
-
- slotNumber := uint64(17)
-
- outAndProof, err := babeService.runLottery(slotNumber, testEpochIndex)
- require.NoError(t, err)
- require.NotNil(t, outAndProof, "proof was nil when over threshold")
-
- babeService.slotToProof[slotNumber] = outAndProof
-
- // create pre-digest
- slot := Slot{
- start: time.Now(),
- duration: babeService.slotDuration,
- number: slotNumber,
- }
-
- predigest, err := builder.buildBlockPreDigest(slot)
- require.NoError(t, err)
-
- digest := types.NewDigest()
- err = digest.Add(predigest)
- require.NoError(t, err)
- block := &types.Block{
- Header: types.Header{
- ParentHash: genesisHeader.Hash(),
- Number: big.NewInt(int64(1)),
- Digest: digest,
- },
- Body: types.Body{},
- }
-
- arrivalTime := time.Now().UnixNano() - slot.duration.Nanoseconds()
-
- err = babeService.blockState.(*state.BlockState).AddBlockWithArrivalTime(block, time.Unix(0, arrivalTime))
- require.NoError(t, err)
-
- estimatedSlot, err := babeService.estimateCurrentSlot()
- require.NoError(t, err)
- if estimatedSlot > slotNumber+2 && estimatedSlot < slotNumber-2 {
- t.Fatalf("Fail: got %d expected %d", estimatedSlot, slotNumber)
- }
-}
-
-func TestGetCurrentSlot(t *testing.T) {
- babeService := createTestService(t, nil)
-
- before, err := time.ParseDuration("300s")
- require.NoError(t, err)
- beforeSecs := time.Now().Unix() - int64(before.Seconds())
-
- addBlocksToState(t, babeService, 100, babeService.blockState, time.Unix(beforeSecs, 0))
-
- res, err := babeService.getCurrentSlot()
- require.NoError(t, err)
-
- expected := uint64(167)
-
- if res > expected+2 && res < expected-2 {
- t.Fatalf("Fail: got %d expected %d", res, expected)
- }
-}
diff --git a/lib/babe/verify_test.go b/lib/babe/verify_test.go
index ce66f35871..6bd6ddd525 100644
--- a/lib/babe/verify_test.go
+++ b/lib/babe/verify_test.go
@@ -18,7 +18,6 @@ package babe
import (
"errors"
- "fmt"
"io/ioutil"
"os"
"testing"
@@ -49,7 +48,7 @@ func newTestVerificationManager(t *testing.T, genCfg *types.BabeConfiguration) *
genCfg = genesisBABEConfig
}
- gen, genTrie, genHeader := genesis.NewTestGenesisWithTrieAndHeader(t)
+ gen, genTrie, genHeader := genesis.NewDevGenesisWithTrieAndHeader(t)
err = dbSrv.Initialise(gen, genHeader, genTrie)
require.NoError(t, err)
@@ -71,10 +70,7 @@ func newTestVerificationManager(t *testing.T, genCfg *types.BabeConfiguration) *
func TestVerificationManager_OnDisabled_InvalidIndex(t *testing.T) {
vm := newTestVerificationManager(t, nil)
- babeService := createTestService(t, &ServiceConfig{
- ThresholdNumerator: 1,
- ThresholdDenominator: 1,
- })
+ babeService := createTestService(t, nil)
block, _ := createTestBlock(t, babeService, genesisHeader, [][]byte{}, 1, testEpochIndex)
err := vm.SetOnDisabled(1, &block.Header)
require.Equal(t, err, ErrInvalidBlockProducerIndex)
@@ -85,15 +81,11 @@ func TestVerificationManager_OnDisabled_NewDigest(t *testing.T) {
require.NoError(t, err)
cfg := &ServiceConfig{
- Keypair: kp,
- ThresholdNumerator: 1,
- ThresholdDenominator: 1,
+ Keypair: kp,
}
babeService := createTestService(t, cfg)
- fmt.Println("Finished creating test service")
-
vm := newTestVerificationManager(t, nil)
vm.epochInfo[testEpochIndex] = &verifierInfo{
authorities: babeService.epochData.authorities,
@@ -101,7 +93,8 @@ func TestVerificationManager_OnDisabled_NewDigest(t *testing.T) {
randomness: babeService.epochData.randomness,
}
- block, _ := createTestBlock(t, babeService, genesisHeader, [][]byte{}, 1, testEpochIndex)
+ parent, _ := babeService.blockState.BestBlockHeader()
+ block, _ := createTestBlock(t, babeService, parent, [][]byte{}, 1, testEpochIndex)
err = vm.blockState.AddBlock(block)
require.NoError(t, err)
@@ -109,7 +102,7 @@ func TestVerificationManager_OnDisabled_NewDigest(t *testing.T) {
require.NoError(t, err)
// create an OnDisabled change on a different branch
- block, _ = createTestBlock(t, babeService, genesisHeader, [][]byte{}, 2, testEpochIndex)
+ block, _ = createTestBlock(t, babeService, parent, [][]byte{}, 2, testEpochIndex)
err = vm.blockState.AddBlock(block)
require.NoError(t, err)
@@ -122,9 +115,7 @@ func TestVerificationManager_OnDisabled_DuplicateDigest(t *testing.T) {
require.NoError(t, err)
cfg := &ServiceConfig{
- Keypair: kp,
- ThresholdNumerator: 1,
- ThresholdDenominator: 1,
+ Keypair: kp,
}
babeService := createTestService(t, cfg)
@@ -152,64 +143,8 @@ func TestVerificationManager_OnDisabled_DuplicateDigest(t *testing.T) {
require.Equal(t, ErrAuthorityAlreadyDisabled, err)
}
-func TestVerificationManager_VerifyBlock_IsDisabled(t *testing.T) {
- t.Skip() // TODO: fix OnDisabled digests and re-enable this
-
- babeService := createTestService(t, &ServiceConfig{
- ThresholdNumerator: 1,
- ThresholdDenominator: 1,
- })
-
- rt, err := babeService.blockState.GetRuntime(nil)
- require.NoError(t, err)
-
- cfg, err := rt.BabeConfiguration()
- require.NoError(t, err)
-
- cfg.GenesisAuthorities = types.AuthoritiesToRaw(babeService.epochData.authorities)
- cfg.C1 = 1
- cfg.C2 = 1
-
- vm := newTestVerificationManager(t, cfg)
- block, _ := createTestBlock(t, babeService, genesisHeader, [][]byte{}, 1, testEpochIndex)
-
- err = vm.blockState.AddBlock(block)
- require.NoError(t, err)
-
- err = vm.SetOnDisabled(0, &block.Header)
- require.NoError(t, err)
-
- // a block that we created, that disables ourselves, should still be accepted
- err = vm.VerifyBlock(&block.Header)
- require.NoError(t, err)
-
- block, _ = createTestBlock(t, babeService, &block.Header, [][]byte{}, 2, testEpochIndex)
- err = vm.blockState.AddBlock(block)
- require.NoError(t, err)
-
- // any blocks following the one where we are disabled should reject
- err = vm.VerifyBlock(&block.Header)
- require.Equal(t, ErrAuthorityDisabled, err)
-
- // let's try a block on a different chain, it shouldn't reject
- parentHeader := genesisHeader
- for slot := 77; slot < 80; slot++ {
- block, _ = createTestBlock(t, babeService, parentHeader, [][]byte{}, uint64(slot), testEpochIndex)
- err = vm.blockState.AddBlock(block)
- require.NoError(t, err)
- parentHeader = &block.Header
- }
-
- err = vm.VerifyBlock(&block.Header)
- require.NoError(t, err)
-}
-
func TestVerificationManager_VerifyBlock_Ok(t *testing.T) {
- babeService := createTestService(t, &ServiceConfig{
- ThresholdNumerator: 1,
- ThresholdDenominator: 1,
- })
-
+ babeService := createTestService(t, nil)
rt, err := babeService.blockState.GetRuntime(nil)
require.NoError(t, err)
@@ -229,11 +164,7 @@ func TestVerificationManager_VerifyBlock_Ok(t *testing.T) {
}
func TestVerificationManager_VerifyBlock_MultipleEpochs(t *testing.T) {
- babeService := createTestService(t, &ServiceConfig{
- ThresholdNumerator: 1,
- ThresholdDenominator: 1,
- })
-
+ babeService := createTestService(t, nil)
rt, err := babeService.blockState.GetRuntime(nil)
require.NoError(t, err)
@@ -269,12 +200,7 @@ func TestVerificationManager_VerifyBlock_MultipleEpochs(t *testing.T) {
}
func TestVerificationManager_VerifyBlock_InvalidBlockOverThreshold(t *testing.T) {
- t.Skip() // TODO
- babeService := createTestService(t, &ServiceConfig{
- ThresholdNumerator: 1,
- ThresholdDenominator: 1,
- })
-
+ babeService := createTestService(t, nil)
rt, err := babeService.blockState.GetRuntime(nil)
require.NoError(t, err)
@@ -294,11 +220,7 @@ func TestVerificationManager_VerifyBlock_InvalidBlockOverThreshold(t *testing.T)
}
func TestVerificationManager_VerifyBlock_InvalidBlockAuthority(t *testing.T) {
- babeService := createTestService(t, &ServiceConfig{
- ThresholdNumerator: 1,
- ThresholdDenominator: 1,
- })
-
+ babeService := createTestService(t, nil)
rt, err := babeService.blockState.GetRuntime(nil)
require.NoError(t, err)
diff --git a/lib/blocktree/blocktree.go b/lib/blocktree/blocktree.go
index c7fc51527f..9ba0aafd80 100644
--- a/lib/blocktree/blocktree.go
+++ b/lib/blocktree/blocktree.go
@@ -34,7 +34,7 @@ type Hash = common.Hash
// BlockTree represents the current state with all possible blocks
type BlockTree struct {
- head *node // root node TODO: rename this!!
+ root *node
leaves *leafMap
db database.Database
sync.RWMutex
@@ -45,7 +45,7 @@ type BlockTree struct {
// NewEmptyBlockTree creates a BlockTree with a nil head
func NewEmptyBlockTree(db database.Database) *BlockTree {
return &BlockTree{
- head: nil,
+ root: nil,
leaves: newEmptyLeafMap(),
db: db,
nodeCache: make(map[Hash]*node),
@@ -56,17 +56,17 @@ func NewEmptyBlockTree(db database.Database) *BlockTree {
// NewBlockTreeFromRoot initialises a blocktree with a root block. The root block is always the most recently
// finalised block (ie the genesis block if the node is just starting.)
func NewBlockTreeFromRoot(root *types.Header, db database.Database) *BlockTree {
- head := &node{
+ n := &node{
hash: root.Hash(),
parent: nil,
children: []*node{},
depth: big.NewInt(0),
- arrivalTime: uint64(time.Now().Unix()), // TODO: genesis block doesn't need an arrival time, it isn't used in median algo
+ arrivalTime: uint64(time.Now().Unix()),
}
return &BlockTree{
- head: head,
- leaves: newLeafMap(head),
+ root: n,
+ leaves: newLeafMap(n),
db: db,
nodeCache: make(map[Hash]*node),
runtime: &sync.Map{},
@@ -77,7 +77,7 @@ func NewBlockTreeFromRoot(root *types.Header, db database.Database) *BlockTree {
func (bt *BlockTree) GenesisHash() Hash {
bt.RLock()
defer bt.RUnlock()
- return bt.head.hash
+ return bt.root.hash
}
// AddBlock inserts the block as child of its parent node
@@ -148,12 +148,12 @@ func (bt *BlockTree) GetAllBlocksAtDepth(hash common.Hash) []common.Hash {
depth := big.NewInt(0).Add(bt.getNode(hash).depth, big.NewInt(1))
- if bt.head.depth.Cmp(depth) == 0 {
- hashes = append(hashes, bt.head.hash)
+ if bt.root.depth.Cmp(depth) == 0 {
+ hashes = append(hashes, bt.root.hash)
return hashes
}
- return bt.head.getNodesWithDepth(depth, hashes)
+ return bt.root.getNodesWithDepth(depth, hashes)
}
func (bt *BlockTree) setInCache(b *node) {
@@ -174,8 +174,8 @@ func (bt *BlockTree) getNode(h Hash) (ret *node) {
return b
}
- if bt.head.hash == h {
- return bt.head
+ if bt.root.hash == h {
+ return bt.root
}
for _, leaf := range bt.leaves.nodes() {
@@ -184,7 +184,7 @@ func (bt *BlockTree) getNode(h Hash) (ret *node) {
}
}
- for _, child := range bt.head.children {
+ for _, child := range bt.root.children {
if n := child.getNode(h); n != nil {
return n
}
@@ -205,7 +205,7 @@ func (bt *BlockTree) Prune(finalised Hash) (pruned []Hash) {
}
}()
- if finalised == bt.head.hash {
+ if finalised == bt.root.hash {
return pruned
}
@@ -214,8 +214,8 @@ func (bt *BlockTree) Prune(finalised Hash) (pruned []Hash) {
return pruned
}
- pruned = bt.head.prune(n, nil)
- bt.head = n
+ pruned = bt.root.prune(n, nil)
+ bt.root = n
leaves := n.getLeaves(nil)
bt.leaves = newEmptyLeafMap()
for _, leaf := range leaves {
@@ -231,9 +231,9 @@ func (bt *BlockTree) String() string {
defer bt.RUnlock()
// Construct tree
- tree := gotree.New(bt.head.string())
+ tree := gotree.New(bt.root.string())
- for _, child := range bt.head.children {
+ for _, child := range bt.root.children {
sub := tree.Add(child.string())
child.createTree(sub)
}
@@ -371,7 +371,7 @@ func (bt *BlockTree) GetAllBlocks() []Hash {
bt.RLock()
defer bt.RUnlock()
- return bt.head.getAllDescendants(nil)
+ return bt.root.getAllDescendants(nil)
}
// DeepCopy returns a copy of the BlockTree
@@ -384,11 +384,11 @@ func (bt *BlockTree) DeepCopy() *BlockTree {
nodeCache: make(map[Hash]*node),
}
- if bt.head == nil {
+ if bt.root == nil {
return btCopy
}
- btCopy.head = bt.head.deepCopy(nil)
+ btCopy.root = bt.root.deepCopy(nil)
if bt.leaves != nil {
btCopy.leaves = newEmptyLeafMap()
diff --git a/lib/blocktree/blocktree_test.go b/lib/blocktree/blocktree_test.go
index 692e5834d1..4dae7c4300 100644
--- a/lib/blocktree/blocktree_test.go
+++ b/lib/blocktree/blocktree_test.go
@@ -35,10 +35,10 @@ var testHeader = &types.Header{
Digest: types.NewDigest(),
}
-func newBlockTreeFromNode(head *node, db database.Database) *BlockTree {
+func newBlockTreeFromNode(root *node, db database.Database) *BlockTree {
return &BlockTree{
- head: head,
- leaves: newLeafMap(head),
+ root: root,
+ leaves: newLeafMap(root),
db: db,
}
}
@@ -47,9 +47,9 @@ func createFlatTree(t *testing.T, depth int) (*BlockTree, []common.Hash) {
bt := NewBlockTreeFromRoot(testHeader, nil)
require.NotNil(t, bt)
- previousHash := bt.head.hash
+ previousHash := bt.root.hash
- hashes := []common.Hash{bt.head.hash}
+ hashes := []common.Hash{bt.root.hash}
for i := 1; i <= depth; i++ {
header := &types.Header{
ParentHash: previousHash,
@@ -131,12 +131,12 @@ func TestNode_isDecendantOf(t *testing.T) {
// Check leaf is descendant of root
leaf := bt.getNode(hashes[3])
- if !leaf.isDescendantOf(bt.head) {
+ if !leaf.isDescendantOf(bt.root) {
t.Error("failed to verify leaf is descendant of root")
}
// Verify the inverse relationship does not hold
- if bt.head.isDescendantOf(leaf) {
+ if bt.root.isDescendantOf(leaf) {
t.Error("root should not be descendant of anything")
}
}
@@ -257,7 +257,7 @@ func TestBlockTree_GetNodeCache(t *testing.T) {
func TestBlockTree_GetAllBlocksAtDepth(t *testing.T) {
bt, _ := createTestBlockTree(testHeader, 8, nil)
- hashes := bt.head.getNodesWithDepth(big.NewInt(10), []common.Hash{})
+ hashes := bt.root.getNodesWithDepth(big.NewInt(10), []common.Hash{})
expected := []common.Hash{}
@@ -322,7 +322,7 @@ func TestBlockTree_GetAllBlocksAtDepth(t *testing.T) {
}
}
- hashes = bt.head.getNodesWithDepth(big.NewInt(int64(desiredDepth)), []common.Hash{})
+ hashes = bt.root.getNodesWithDepth(big.NewInt(int64(desiredDepth)), []common.Hash{})
if !reflect.DeepEqual(hashes, expected) {
t.Fatalf("Fail: did not get all expected hashes got %v expected %v", hashes, expected)
@@ -333,11 +333,11 @@ func TestBlockTree_IsDecendantOf(t *testing.T) {
// Create tree with depth 4 (with 4 nodes)
bt, hashes := createFlatTree(t, 4)
- isDescendant, err := bt.IsDescendantOf(bt.head.hash, hashes[3])
+ isDescendant, err := bt.IsDescendantOf(bt.root.hash, hashes[3])
require.NoError(t, err)
require.True(t, isDescendant)
- isDescendant, err = bt.IsDescendantOf(hashes[3], bt.head.hash)
+ isDescendant, err = bt.IsDescendantOf(hashes[3], bt.root.hash)
require.NoError(t, err)
require.False(t, isDescendant)
}
@@ -403,7 +403,7 @@ func TestBlockTree_Prune(t *testing.T) {
copy := bt.DeepCopy()
// pick some block to finalise
- finalised := bt.head.children[0].children[0].children[0]
+ finalised := bt.root.children[0].children[0].children[0]
pruned := bt.Prune(finalised.hash)
for _, prunedHash := range pruned {
@@ -436,7 +436,7 @@ func TestBlockTree_PruneCache(t *testing.T) {
}
// pick some block to finalise
- finalised := bt.head.children[0].children[0].children[0]
+ finalised := bt.root.children[0].children[0].children[0]
pruned := bt.Prune(finalised.hash)
for _, prunedHash := range pruned {
@@ -464,11 +464,11 @@ func TestBlockTree_DeepCopy(t *testing.T) {
require.True(t, equalNodeValue(b, b2))
}
- require.True(t, equalNodeValue(bt.head, btCopy.head), "BlockTree heads not equal")
+ require.True(t, equalNodeValue(bt.root, btCopy.root), "BlockTree heads not equal")
require.True(t, equalLeave(bt.leaves, btCopy.leaves), "BlockTree leaves not equal")
- btCopy.head = &node{}
- require.NotEqual(t, bt.head, btCopy.head)
+ btCopy.root = &node{}
+ require.NotEqual(t, bt.root, btCopy.root)
}
func equalNodeValue(nd *node, ndCopy *node) bool {
diff --git a/lib/blocktree/database.go b/lib/blocktree/database.go
index 77c1974b72..2a3f8e0279 100644
--- a/lib/blocktree/database.go
+++ b/lib/blocktree/database.go
@@ -40,7 +40,7 @@ func (bt *BlockTree) Load() error {
// Encode recursively encodes the block tree
// enc(node) = [32B block hash + 8B arrival time + 8B num children n] | enc(children[0]) | ... | enc(children[n-1])
func (bt *BlockTree) Encode() ([]byte, error) {
- return encodeRecursive(bt.head, []byte{})
+ return encodeRecursive(bt.root, []byte{})
}
// encode recursively encodes the blocktree by depth-first traversal
@@ -90,7 +90,7 @@ func (bt *BlockTree) Decode(in []byte) error {
return err
}
- bt.head = &node{
+ bt.root = &node{
hash: hash,
parent: nil,
children: make([]*node, numChildren),
@@ -98,9 +98,9 @@ func (bt *BlockTree) Decode(in []byte) error {
arrivalTime: arrivalTime,
}
- bt.leaves = newLeafMap(bt.head)
+ bt.leaves = newLeafMap(bt.root)
- return bt.decodeRecursive(r, bt.head)
+ return bt.decodeRecursive(r, bt.root)
}
// decode recursively decodes the blocktree
diff --git a/lib/blocktree/database_test.go b/lib/blocktree/database_test.go
index 7072a68813..1237263053 100644
--- a/lib/blocktree/database_test.go
+++ b/lib/blocktree/database_test.go
@@ -88,7 +88,7 @@ func TestStoreBlockTree(t *testing.T) {
err = resBt.Load()
require.NoError(t, err)
- if !reflect.DeepEqual(bt.head, resBt.head) {
+ if !reflect.DeepEqual(bt.root, resBt.root) {
t.Fatalf("Fail: got %v expected %v", resBt, bt)
}
diff --git a/lib/blocktree/node_test.go b/lib/blocktree/node_test.go
index b2c37c3d3f..96572addae 100644
--- a/lib/blocktree/node_test.go
+++ b/lib/blocktree/node_test.go
@@ -60,8 +60,8 @@ func TestNode_Prune(t *testing.T) {
copy := bt.DeepCopy()
// pick some block to finalise
- finalised := bt.head.children[0].children[0].children[0]
- pruned := bt.head.prune(finalised, nil)
+ finalised := bt.root.children[0].children[0].children[0]
+ pruned := bt.root.prune(finalised, nil)
for _, prunedHash := range pruned {
prunedNode := copy.getNode(prunedHash)
diff --git a/lib/common/well_known_keys.go b/lib/common/well_known_keys.go
index 75346cec28..1bf4f706c5 100644
--- a/lib/common/well_known_keys.go
+++ b/lib/common/well_known_keys.go
@@ -8,29 +8,3 @@ var (
// it's set to empty or false (0x00) otherwise
UpgradedToDualRefKey = MustHexToBytes("0x26aa394eea5630e07c48ae0c9558cef7c21aab032aaa6e946ca50ad39ab66603")
)
-
-// BalanceKey returns the storage trie key for the balance of the account with the given public key
-// TODO: deprecate
-func BalanceKey(key [32]byte) ([]byte, error) {
- accKey := append([]byte("balance:"), key[:]...)
-
- hash, err := Blake2bHash(accKey)
- if err != nil {
- return nil, err
- }
-
- return hash[:], nil
-}
-
-// NonceKey returns the storage trie key for the nonce of the account with the given public key
-// TODO: deprecate
-func NonceKey(key [32]byte) ([]byte, error) {
- accKey := append([]byte("nonce:"), key[:]...)
-
- hash, err := Blake2bHash(accKey)
- if err != nil {
- return nil, err
- }
-
- return hash[:], nil
-}
diff --git a/lib/genesis/helpers.go b/lib/genesis/helpers.go
index 8a681efa2f..c9a785cdfe 100644
--- a/lib/genesis/helpers.go
+++ b/lib/genesis/helpers.go
@@ -300,7 +300,7 @@ func buildRawArrayInterface(a []interface{}, kv *keyValue) error {
return err
}
case string:
- // todo check to confirm it's an address
+ // TODO: check to confirm it's an address (#1865)
tba := crypto.PublicAddressToByteArray(common.Address(v2))
kv.value = kv.value + fmt.Sprintf("%x", tba)
kv.iVal = append(kv.iVal, tba)
@@ -379,7 +379,7 @@ func generateStorageValue(i interface{}, idx int) ([]byte, error) {
return nil, err
}
case [][]interface{}:
- // TODO: for members field in phragmenElection struct figure out the correct format for encoding value
+ // TODO: for members field in phragmenElection struct figure out the correct format for encoding value (#1866)
for _, data := range t {
for _, v := range data {
var accAddr accountAddr
@@ -403,7 +403,7 @@ func generateStorageValue(i interface{}, idx int) ([]byte, error) {
return nil, err
}
default:
- return nil, fmt.Errorf("errror")
+ return nil, fmt.Errorf("invalid value type")
}
return encode, nil
}
@@ -413,6 +413,7 @@ func generateContractKeyValue(c *contracts, prefixKey string, res map[string]str
key string
err error
)
+
// First field of contract is the storage key
val := reflect.ValueOf(c)
if k := reflect.Indirect(val).Type().Field(0).Name; k == currentSchedule {
@@ -426,6 +427,7 @@ func generateContractKeyValue(c *contracts, prefixKey string, res map[string]str
if err != nil {
return err
}
+
res[key] = common.BytesToHex(encode)
return nil
}
@@ -433,20 +435,24 @@ func generateContractKeyValue(c *contracts, prefixKey string, res map[string]str
func generateKeyValue(s interface{}, prefixKey string, res map[string]string) error {
val := reflect.ValueOf(s)
n := reflect.Indirect(val).NumField()
+
for i := 0; i < n; i++ {
val := reflect.ValueOf(s)
storageKey := reflect.Indirect(val).Type().Field(i).Name
- if storageKey == phantom { //TODO: figure out what to do with Phantom as its value is null
+ if storageKey == phantom { // ignore Phantom as its value is null
continue
}
+
key, err := generateStorageKey(prefixKey, storageKey)
if err != nil {
return err
}
+
value, err := generateStorageValue(s, i)
if err != nil {
return err
}
+
res[key] = common.BytesToHex(value)
}
return nil
diff --git a/lib/genesis/pallet.go b/lib/genesis/pallet.go
index d429ac4846..fad5661d8e 100644
--- a/lib/genesis/pallet.go
+++ b/lib/genesis/pallet.go
@@ -124,7 +124,7 @@ type contracts struct {
type society struct {
Pot *scale.Uint128 `json:"Pot"`
MaxMembers uint32 `json:"MaxMembers"`
- // TODO: figure out the correct encoding format of members field
+ // TODO: figure out the correct encoding format of members field (#1867)
Members []string `json:"Members"`
}
@@ -136,7 +136,7 @@ type staking struct {
ForceEra string `json:"ForceEra"`
SlashRewardFraction uint32 `json:"SlashRewardFraction"`
CanceledSlashPayout *scale.Uint128 `json:"CanceledSlashPayout"`
- // TODO: figure out below fields storage key.
+ // TODO: figure out below fields storage key. (#1868)
// Stakers [][]interface{} `json:"Stakers"`
}
@@ -160,6 +160,6 @@ type instance1Membership struct {
}
type phragmenElection struct {
- // TODO: figure out the correct encoding format of members data
+ // TODO: figure out the correct encoding format of members data (#1866)
Members [][]interface{} `json:"Members"`
}
diff --git a/lib/genesis/test_utils.go b/lib/genesis/test_utils.go
index 3b7a1f0a5b..d7f4b0e90f 100644
--- a/lib/genesis/test_utils.go
+++ b/lib/genesis/test_utils.go
@@ -125,10 +125,28 @@ func NewTestGenesisWithTrieAndHeader(t *testing.T) (*Genesis, *trie.Trie, *types
require.NoError(t, err)
}
+ tr, h := newGenesisTrieAndHeader(t, gen)
+ return gen, tr, h
+}
+
+// NewDevGenesisWithTrieAndHeader generates test dev genesis, genesis trie and genesis header
+func NewDevGenesisWithTrieAndHeader(t *testing.T) (*Genesis, *trie.Trie, *types.Header) {
+ gen, err := NewGenesisFromJSONRaw("../../chain/dev/genesis.json")
+ if err != nil {
+ gen, err = NewGenesisFromJSONRaw("../../../chain/dev/genesis.json")
+ require.NoError(t, err)
+ }
+
+ tr, h := newGenesisTrieAndHeader(t, gen)
+ return gen, tr, h
+}
+
+func newGenesisTrieAndHeader(t *testing.T, gen *Genesis) (*trie.Trie, *types.Header) {
genTrie, err := NewTrieFromGenesis(gen)
require.NoError(t, err)
genesisHeader, err := types.NewHeader(common.NewHash([]byte{0}), genTrie.MustHash(), trie.EmptyHash, big.NewInt(0), types.NewDigest())
require.NoError(t, err)
- return gen, genTrie, genesisHeader
+
+ return genTrie, genesisHeader
}
diff --git a/lib/grandpa/grandpa.go b/lib/grandpa/grandpa.go
index 3900355359..d4152b9665 100644
--- a/lib/grandpa/grandpa.go
+++ b/lib/grandpa/grandpa.go
@@ -41,7 +41,8 @@ const (
)
var (
- interval = time.Second // TODO: make this configurable; currently 1s is same as substrate; total round length is interval * 2
+ // TODO: make this configurable; currently 1s is same as substrate; total round length is interval * 2 (#1869)
+ interval = time.Second
logger = log.New("pkg", "grandpa")
)
@@ -53,7 +54,7 @@ type Service struct {
blockState BlockState
grandpaState GrandpaState
digestHandler DigestHandler
- keypair *ed25519.Keypair // TODO: change to grandpa keystore
+ keypair *ed25519.Keypair // TODO: change to grandpa keystore (#1870)
mapLock sync.Mutex
chanLock sync.Mutex
roundLock sync.Mutex
@@ -175,10 +176,8 @@ func NewService(cfg *Config) (*Service, error) {
// Start begins the GRANDPA finality service
func (s *Service) Start() error {
- // TODO: determine if we need to send a catch-up request
-
- err := s.registerProtocol()
- if err != nil {
+ // TODO: determine if we need to send a catch-up request (#1531)
+ if err := s.registerProtocol(); err != nil {
return err
}
@@ -189,8 +188,7 @@ func (s *Service) Start() error {
}
go func() {
- err := s.initiate()
- if err != nil {
+ if err := s.initiate(); err != nil {
logger.Crit("failed to initiate", "error", err)
}
}()
@@ -838,7 +836,7 @@ func (s *Service) createJustification(bfc common.Hash, stage Subround) ([]Signed
spc = s.precommits
}
- // TODO: use equivacatory votes to create justification as well
+ // TODO: use equivacatory votes to create justification as well (#1667)
spc.Range(func(_, value interface{}) bool {
pc := value.(*SignedVote)
var isDescendant bool
@@ -884,7 +882,7 @@ func (s *Service) getBestFinalCandidate() (*Vote, error) {
// if there are no blocks with >=2/3 pre-commits, just return the pre-voted block
// TODO: is this correct? the spec implies that it should return nil, but discussions have suggested
- // that we return the prevoted block.
+ // that we return the prevoted block. (#1815)
if len(blocks) == 0 {
return &prevoted, nil
}
@@ -989,7 +987,7 @@ func (s *Service) getPreVotedBlock() (Vote, error) {
return Vote{}, err
}
- // TODO: if there are no blocks with >=2/3 voters, then just pick the highest voted block
+ // if there are no blocks with >=2/3 voters, then just pick the highest voted block
if len(blocks) == 0 {
return s.getGrandpaGHOST()
}
diff --git a/lib/grandpa/grandpa_test.go b/lib/grandpa/grandpa_test.go
index f667dfcd96..509f98e9bf 100644
--- a/lib/grandpa/grandpa_test.go
+++ b/lib/grandpa/grandpa_test.go
@@ -71,7 +71,7 @@ func newTestState(t *testing.T) *state.Service {
t.Cleanup(func() { db.Close() })
- gen, genTrie, _ := genesis.NewTestGenesisWithTrieAndHeader(t)
+ _, genTrie, _ := genesis.NewTestGenesisWithTrieAndHeader(t)
block, err := state.NewBlockStateFromGenesis(db, testGenesisHeader)
require.NoError(t, err)
@@ -80,7 +80,7 @@ func newTestState(t *testing.T) *state.Service {
rtCfg.Storage, err = rtstorage.NewTrieState(genTrie)
require.NoError(t, err)
- rt, err := wasmer.NewRuntimeFromGenesis(gen, rtCfg)
+ rt, err := wasmer.NewRuntimeFromGenesis(rtCfg)
require.NoError(t, err)
block.StoreRuntime(block.BestBlockHash(), rt)
@@ -1052,7 +1052,7 @@ func TestGetBestFinalCandidate_PrecommitOnAnotherChain(t *testing.T) {
func TestDeterminePreVote_NoPrimaryPreVote(t *testing.T) {
gs, st := newTestService(t)
- state.AddBlocksToState(t, st.Block, 3)
+ state.AddBlocksToState(t, st.Block, 3, false)
pv, err := gs.determinePreVote()
require.NoError(t, err)
@@ -1064,10 +1064,10 @@ func TestDeterminePreVote_NoPrimaryPreVote(t *testing.T) {
func TestDeterminePreVote_WithPrimaryPreVote(t *testing.T) {
gs, st := newTestService(t)
- state.AddBlocksToState(t, st.Block, 3)
+ state.AddBlocksToState(t, st.Block, 3, false)
header, err := st.Block.BestBlockHeader()
require.NoError(t, err)
- state.AddBlocksToState(t, st.Block, 1)
+ state.AddBlocksToState(t, st.Block, 1, false)
derivePrimary := gs.derivePrimary()
primary := derivePrimary.PublicKeyBytes()
@@ -1085,7 +1085,7 @@ func TestDeterminePreVote_WithPrimaryPreVote(t *testing.T) {
func TestDeterminePreVote_WithInvalidPrimaryPreVote(t *testing.T) {
gs, st := newTestService(t)
- state.AddBlocksToState(t, st.Block, 3)
+ state.AddBlocksToState(t, st.Block, 3, false)
header, err := st.Block.BestBlockHeader()
require.NoError(t, err)
@@ -1095,7 +1095,7 @@ func TestDeterminePreVote_WithInvalidPrimaryPreVote(t *testing.T) {
Vote: *NewVoteFromHeader(header),
})
- state.AddBlocksToState(t, st.Block, 5)
+ state.AddBlocksToState(t, st.Block, 5, false)
gs.head, err = st.Block.BestBlockHeader()
require.NoError(t, err)
@@ -1287,7 +1287,7 @@ func TestGrandpa_NonAuthority(t *testing.T) {
time.Sleep(time.Millisecond * 100)
- state.AddBlocksToState(t, st.Block, 8)
+ state.AddBlocksToState(t, st.Block, 8, false)
head := st.Block.BestBlockHash()
err = st.Block.SetFinalisedHash(head, gs.state.round, gs.state.setID)
require.NoError(t, err)
diff --git a/lib/grandpa/message_handler.go b/lib/grandpa/message_handler.go
index fffdbd77bf..4e08992556 100644
--- a/lib/grandpa/message_handler.go
+++ b/lib/grandpa/message_handler.go
@@ -87,7 +87,7 @@ func (h *MessageHandler) handleNeighbourMessage(msg *NeighbourMessage) error {
}
// TODO; determine if there is some reason we don't receive justifications in responses near the head (usually),
- // and remove the following code if it's fixed.
+ // and remove the following code if it's fixed. (#1815)
head, err := h.blockState.BestBlockNumber()
if err != nil {
return err
@@ -99,7 +99,7 @@ func (h *MessageHandler) handleNeighbourMessage(msg *NeighbourMessage) error {
}
logger.Debug("got neighbour message", "number", msg.Number, "set id", msg.SetID, "round", msg.Round)
- // TODO: should we send a justification request here? potentially re-connect this to sync package?
+ // TODO: should we send a justification request here? potentially re-connect this to sync package? (#1815)
return nil
}
@@ -130,7 +130,8 @@ func (h *MessageHandler) handleCommitMessage(msg *CommitMessage) error {
if err = h.grandpa.grandpaState.SetPrecommits(msg.Round, msg.SetID, pcs); err != nil {
return err
}
- // TODO: re-add catch-up logic
+
+ // TODO: re-add catch-up logic (#1531)
return nil
}
@@ -165,7 +166,7 @@ func (h *MessageHandler) handleCatchUpResponse(msg *CatchUpResponse) error {
logger.Debug("received catch up response", "round", msg.Round, "setID", msg.SetID, "hash", msg.Hash)
- // TODO: re-add catch-up logic
+ // TODO: re-add catch-up logic (#1531)
if true {
return nil
}
diff --git a/lib/grandpa/message_tracker_test.go b/lib/grandpa/message_tracker_test.go
index 091e560e90..76519b6c6f 100644
--- a/lib/grandpa/message_tracker_test.go
+++ b/lib/grandpa/message_tracker_test.go
@@ -34,7 +34,7 @@ func TestMessageTracker_ValidateMessage(t *testing.T) {
require.NoError(t, err)
gs, _, _, _ := setupGrandpa(t, kr.Bob().(*ed25519.Keypair))
- state.AddBlocksToState(t, gs.blockState.(*state.BlockState), 3)
+ state.AddBlocksToState(t, gs.blockState.(*state.BlockState), 3, false)
gs.tracker = newTracker(gs.blockState, gs.messageHandler)
fake := &types.Header{
@@ -60,7 +60,7 @@ func TestMessageTracker_SendMessage(t *testing.T) {
require.NoError(t, err)
gs, in, _, _ := setupGrandpa(t, kr.Bob().(*ed25519.Keypair))
- state.AddBlocksToState(t, gs.blockState.(*state.BlockState), 3)
+ state.AddBlocksToState(t, gs.blockState.(*state.BlockState), 3, false)
gs.tracker = newTracker(gs.blockState, gs.messageHandler)
gs.tracker.start()
defer gs.tracker.stop()
@@ -105,7 +105,7 @@ func TestMessageTracker_ProcessMessage(t *testing.T) {
require.NoError(t, err)
gs, _, _, _ := setupGrandpa(t, kr.Bob().(*ed25519.Keypair))
- state.AddBlocksToState(t, gs.blockState.(*state.BlockState), 3)
+ state.AddBlocksToState(t, gs.blockState.(*state.BlockState), 3, false)
err = gs.Start()
require.NoError(t, err)
@@ -153,7 +153,7 @@ func TestMessageTracker_MapInsideMap(t *testing.T) {
require.NoError(t, err)
gs, _, _, _ := setupGrandpa(t, kr.Bob().(*ed25519.Keypair))
- state.AddBlocksToState(t, gs.blockState.(*state.BlockState), 3)
+ state.AddBlocksToState(t, gs.blockState.(*state.BlockState), 3, false)
gs.tracker = newTracker(gs.blockState, gs.messageHandler)
header := &types.Header{
diff --git a/lib/grandpa/round_test.go b/lib/grandpa/round_test.go
index c95e672897..0bab46deda 100644
--- a/lib/grandpa/round_test.go
+++ b/lib/grandpa/round_test.go
@@ -146,7 +146,7 @@ func TestGrandpa_BaseCase(t *testing.T) {
for i, gs := range gss {
gs, _, _, _ = setupGrandpa(t, kr.Keys[i])
gss[i] = gs
- state.AddBlocksToState(t, gs.blockState.(*state.BlockState), 15)
+ state.AddBlocksToState(t, gs.blockState.(*state.BlockState), 15, false)
pv, err := gs.determinePreVote() //nolint
require.NoError(t, err)
prevotes.Store(gs.publicKeyBytes(), &SignedVote{
@@ -197,7 +197,7 @@ func TestGrandpa_DifferentChains(t *testing.T) {
gss[i] = gs
r := rand.Intn(3)
- state.AddBlocksToState(t, gs.blockState.(*state.BlockState), 4+r)
+ state.AddBlocksToState(t, gs.blockState.(*state.BlockState), 4+r, false)
pv, err := gs.determinePreVote() //nolint
require.NoError(t, err)
prevotes.Store(gs.publicKeyBytes(), &SignedVote{
@@ -234,7 +234,8 @@ func TestGrandpa_DifferentChains(t *testing.T) {
finalised := gss[0].head
for i, gs := range gss {
- // TODO: this can be changed to equal once attemptToFinalizeRound is implemented (needs check for >=2/3 precommits)
+ // TODO: this can be changed to equal once attemptToFinalizeRound is implemented
+ // (needs check for >=2/3 precommits) (#1026)
headOk := onSameChain(gss[0].blockState, finalised.Hash(), gs.head.Hash())
finalisedOK := onSameChain(gs.blockState, finalised.Hash(), gs.head.Hash())
require.True(t, headOk || finalisedOK, "node %d did not match: %s", i, gs.blockState.BlocktreeAsString())
@@ -282,7 +283,7 @@ func TestPlayGrandpaRound_BaseCase(t *testing.T) {
outs[i] = out
fins[i] = fin
- state.AddBlocksToState(t, gs.blockState.(*state.BlockState), 4)
+ state.AddBlocksToState(t, gs.blockState.(*state.BlockState), 4, false)
}
for _, out := range outs {
@@ -367,7 +368,7 @@ func TestPlayGrandpaRound_VaryingChain(t *testing.T) {
r := 0
r = rand.Intn(diff)
- chain, _ := state.AddBlocksToState(t, gs.blockState.(*state.BlockState), 4+r)
+ chain, _ := state.AddBlocksToState(t, gs.blockState.(*state.BlockState), 4+r, false)
if r == diff-1 {
headers = chain
}
@@ -565,7 +566,7 @@ func TestPlayGrandpaRound_MultipleRounds(t *testing.T) {
outs[i] = out
fins[i] = fin
- state.AddBlocksToState(t, gs.blockState.(*state.BlockState), 4)
+ state.AddBlocksToState(t, gs.blockState.(*state.BlockState), 4, false)
}
for _, out := range outs {
@@ -627,7 +628,7 @@ func TestPlayGrandpaRound_MultipleRounds(t *testing.T) {
}
for _, gs := range gss {
- state.AddBlocksToState(t, gs.blockState.(*state.BlockState), 1)
+ state.AddBlocksToState(t, gs.blockState.(*state.BlockState), 1, false)
}
}
diff --git a/lib/grandpa/state.go b/lib/grandpa/state.go
index 5f256e259e..cbec44d859 100644
--- a/lib/grandpa/state.go
+++ b/lib/grandpa/state.go
@@ -67,7 +67,7 @@ type GrandpaState interface { //nolint
}
// DigestHandler is the interface required by GRANDPA for the digest handler
-type DigestHandler interface { // TODO: remove, use GrandpaState
+type DigestHandler interface { // TODO: use GrandpaState instead (#1871)
NextGrandpaAuthorityChange() uint64
}
diff --git a/lib/grandpa/types.go b/lib/grandpa/types.go
index f3b219c303..63b95a8705 100644
--- a/lib/grandpa/types.go
+++ b/lib/grandpa/types.go
@@ -94,7 +94,7 @@ func (s *State) pubkeyToVoter(pk *ed25519.PublicKey) (*Voter, error) {
}
// threshold returns the 2/3 |voters| threshold value
-// TODO: determine rounding, is currently set to floor
+// TODO: determine rounding, is currently set to floor (#1815)
func (s *State) threshold() uint64 {
return uint64(2 * len(s.voters) / 3)
}
diff --git a/lib/grandpa/vote_message.go b/lib/grandpa/vote_message.go
index 4127b3ab25..4e80f87290 100644
--- a/lib/grandpa/vote_message.go
+++ b/lib/grandpa/vote_message.go
@@ -170,7 +170,7 @@ func (s *Service) validateMessage(from peer.ID, m *VoteMessage) (*Vote, error) {
}
}
- // TODO: get justification if your round is lower, or just do catch-up?
+ // TODO: get justification if your round is lower, or just do catch-up? (#1815)
return nil, errRoundMismatch(m.Round, s.state.round)
}
diff --git a/lib/grandpa/vote_message_test.go b/lib/grandpa/vote_message_test.go
index 1bb46f2b4b..69cb2f5b16 100644
--- a/lib/grandpa/vote_message_test.go
+++ b/lib/grandpa/vote_message_test.go
@@ -45,7 +45,7 @@ func TestCheckForEquivocation_NoEquivocation(t *testing.T) {
gs, err := NewService(cfg)
require.NoError(t, err)
- state.AddBlocksToState(t, st.Block, 3)
+ state.AddBlocksToState(t, st.Block, 3, false)
h, err := st.Block.BestBlockHeader()
require.NoError(t, err)
@@ -126,27 +126,21 @@ func TestCheckForEquivocation_WithExistingEquivocation(t *testing.T) {
gs, err := NewService(cfg)
require.NoError(t, err)
- var branches []*types.Header
- for {
- _, branches = state.AddBlocksToState(t, st.Block, 8)
- if len(branches) > 1 {
- break
- }
- }
-
- h, err := st.Block.BestBlockHeader()
- require.NoError(t, err)
+ branches := make(map[int]int)
+ branches[6] = 1
+ state.AddBlocksToStateWithFixedBranches(t, st.Block, 8, branches, 0)
+ leaves := gs.blockState.Leaves()
- vote := NewVoteFromHeader(h)
+ vote1, err := NewVoteFromHash(leaves[1], gs.blockState)
require.NoError(t, err)
voter := voters[0]
gs.prevotes.Store(voter.Key.AsBytes(), &SignedVote{
- Vote: *vote,
+ Vote: *vote1,
})
- vote2 := NewVoteFromHeader(branches[0])
+ vote2, err := NewVoteFromHash(leaves[0], gs.blockState)
require.NoError(t, err)
equivocated := gs.checkForEquivocation(&voter, &SignedVote{
@@ -157,8 +151,7 @@ func TestCheckForEquivocation_WithExistingEquivocation(t *testing.T) {
require.Equal(t, 0, gs.lenVotes(prevote))
require.Equal(t, 1, len(gs.pvEquivocations))
- vote3 := NewVoteFromHeader(branches[1])
- require.NoError(t, err)
+ vote3 := vote1
equivocated = gs.checkForEquivocation(&voter, &SignedVote{
Vote: *vote3,
@@ -188,7 +181,7 @@ func TestValidateMessage_Valid(t *testing.T) {
gs, err := NewService(cfg)
require.NoError(t, err)
- state.AddBlocksToState(t, st.Block, 3)
+ state.AddBlocksToState(t, st.Block, 3, false)
h, err := st.Block.BestBlockHeader()
require.NoError(t, err)
@@ -221,7 +214,7 @@ func TestValidateMessage_InvalidSignature(t *testing.T) {
gs, err := NewService(cfg)
require.NoError(t, err)
- state.AddBlocksToState(t, st.Block, 3)
+ state.AddBlocksToState(t, st.Block, 3, false)
h, err := st.Block.BestBlockHeader()
require.NoError(t, err)
@@ -254,7 +247,7 @@ func TestValidateMessage_SetIDMismatch(t *testing.T) {
gs, err := NewService(cfg)
require.NoError(t, err)
- state.AddBlocksToState(t, st.Block, 3)
+ state.AddBlocksToState(t, st.Block, 3, false)
h, err := st.Block.BestBlockHeader()
require.NoError(t, err)
@@ -289,15 +282,11 @@ func TestValidateMessage_Equivocation(t *testing.T) {
gs, err := NewService(cfg)
require.NoError(t, err)
- var branches []*types.Header
- for {
- _, branches = state.AddBlocksToState(t, st.Block, 8)
- if len(branches) != 0 {
- break
- }
- }
-
+ branches := make(map[int]int)
+ branches[6] = 1
+ state.AddBlocksToStateWithFixedBranches(t, st.Block, 8, branches, 0)
leaves := gs.blockState.Leaves()
+
voteA, err := NewVoteFromHash(leaves[0], st.Block)
require.NoError(t, err)
voteB, err := NewVoteFromHash(leaves[1], st.Block)
@@ -336,7 +325,7 @@ func TestValidateMessage_BlockDoesNotExist(t *testing.T) {
gs, err := NewService(cfg)
require.NoError(t, err)
- state.AddBlocksToState(t, st.Block, 3)
+ state.AddBlocksToState(t, st.Block, 3, false)
gs.tracker = newTracker(st.Block, gs.messageHandler)
fake := &types.Header{
@@ -372,20 +361,19 @@ func TestValidateMessage_IsNotDescendant(t *testing.T) {
require.NoError(t, err)
gs.tracker = newTracker(gs.blockState, gs.messageHandler)
- var branches []*types.Header
- for {
- _, branches = state.AddBlocksToState(t, st.Block, 8)
- if len(branches) != 0 {
- break
- }
- }
+ branches := make(map[int]int)
+ branches[6] = 1
+ state.AddBlocksToStateWithFixedBranches(t, st.Block, 8, branches, 0)
+ leaves := gs.blockState.Leaves()
- h, err := st.Block.BestBlockHeader()
+ gs.head, err = gs.blockState.GetHeader(leaves[0])
require.NoError(t, err)
- gs.head = h
gs.keypair = kr.Alice().(*ed25519.Keypair)
- _, msg, err := gs.createSignedVoteAndVoteMessage(NewVoteFromHeader(branches[0]), prevote)
+ vote, err := NewVoteFromHash(leaves[1], gs.blockState)
+ require.NoError(t, err)
+
+ _, msg, err := gs.createSignedVoteAndVoteMessage(vote, prevote)
require.NoError(t, err)
gs.keypair = kr.Bob().(*ed25519.Keypair)
diff --git a/lib/keystore/helpers.go b/lib/keystore/helpers.go
index 576be2b930..8bd816fd46 100644
--- a/lib/keystore/helpers.go
+++ b/lib/keystore/helpers.go
@@ -309,7 +309,6 @@ func UnlockKeys(ks Keystore, dir, unlock, password string) error {
// DetermineKeyType takes string as defined in https://github.com/w3f/PSPs/blob/psp-rpc-api/psp-002.md#Key-types
// and returns the crypto.KeyType
func DetermineKeyType(t string) crypto.KeyType {
- // TODO: create separate keystores for different key types, issue #768
switch t {
case "babe":
return crypto.Sr25519Type
@@ -339,7 +338,6 @@ func HasKey(pubKeyStr, keyType string, keystore Keystore) (bool, error) {
cKeyType := DetermineKeyType(keyType)
var pubKey crypto.PublicKey
- // TODO: consider handling for different key types, see issue #768
switch cKeyType {
case crypto.Sr25519Type:
pubKey, err = sr25519.NewPublicKey(keyBytes)
diff --git a/lib/keystore/keystore.go b/lib/keystore/keystore.go
index 62c2fd932d..b8607df7d1 100644
--- a/lib/keystore/keystore.go
+++ b/lib/keystore/keystore.go
@@ -65,7 +65,7 @@ func NewGlobalKeystore() *GlobalKeystore {
return &GlobalKeystore{
Babe: NewBasicKeystore(BabeName, crypto.Sr25519Type),
Gran: NewBasicKeystore(GranName, crypto.Ed25519Type),
- Acco: NewGenericKeystore(AccoName), // TODO: which type is used? can an account be either type?
+ Acco: NewGenericKeystore(AccoName), // TODO: which type is used? can an account be either type? (#1872)
Aura: NewBasicKeystore(AuraName, crypto.Sr25519Type),
Imon: NewBasicKeystore(ImonName, crypto.Sr25519Type),
Audi: NewBasicKeystore(AudiName, crypto.Sr25519Type),
diff --git a/dot/types/extrinsic_test.go b/lib/runtime/common.go
similarity index 68%
rename from dot/types/extrinsic_test.go
rename to lib/runtime/common.go
index 4c8dd6869d..dedd3ef0cf 100644
--- a/dot/types/extrinsic_test.go
+++ b/lib/runtime/common.go
@@ -14,6 +14,14 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the gossamer library. If not, see .
-package types
+package runtime
-// TODO: improve dot tests #687
+// Int64ToPointerAndSize converts an int64 into a int32 pointer and a int32 length
+func Int64ToPointerAndSize(in int64) (ptr, length int32) {
+ return int32(in), int32(in >> 32)
+}
+
+// PointerAndSizeToInt64 converts int32 pointer and size to a int64
+func PointerAndSizeToInt64(ptr, size int32) int64 {
+ return int64(ptr) | (int64(size) << 32)
+}
diff --git a/lib/runtime/interface.go b/lib/runtime/interface.go
index f0f2b24067..fe13812275 100644
--- a/lib/runtime/interface.go
+++ b/lib/runtime/interface.go
@@ -51,8 +51,9 @@ type Instance interface {
DecodeSessionKeys(enc []byte) ([]byte, error)
PaymentQueryInfo(ext []byte) (*types.TransactionPaymentQueryInfo, error)
- // TODO: parameters and return values for these are undefined in the spec
- CheckInherents()
+ CheckInherents() // TODO: use this in block verification process (#1873)
+
+ // parameters and return values for these are undefined in the spec
RandomSeed()
OffchainWorker()
GenerateSessionKeys()
@@ -78,6 +79,7 @@ type Storage interface {
BeginStorageTransaction()
CommitStorageTransaction()
RollbackStorageTransaction()
+ LoadCode() []byte
}
// BasicNetwork interface for functions used by runtime network state function
diff --git a/lib/runtime/life/exports.go b/lib/runtime/life/exports.go
index 6b398c3b8d..b6ef05a667 100644
--- a/lib/runtime/life/exports.go
+++ b/lib/runtime/life/exports.go
@@ -40,7 +40,7 @@ func (in *Instance) Version() (runtime.Version, error) {
// error comes from scale now, so do a string check
if err != nil {
if strings.Contains(err.Error(), "EOF") {
- // TODO: kusama seems to use the legacy version format
+ // kusama seems to use the legacy version format
lversion := &runtime.LegacyVersionData{}
err = lversion.Decode(res)
return lversion, err
@@ -133,21 +133,24 @@ func (in *Instance) ExecuteBlock(block *types.Block) ([]byte, error) {
if err != nil {
return nil, err
}
+
b.Header.Digest = types.NewDigest()
- // TODO: hack since substrate node_runtime can't seem to handle BABE pre-runtime digests
- // with type prefix (ie Primary, Secondary...)
- if bytes.Equal(in.version.SpecName(), []byte("kusama")) {
- // remove seal digest only
- for _, d := range block.Header.Digest.Types {
- switch d.Value().(type) {
- case types.SealDigest:
- continue
- default:
- err = b.Header.Digest.Add(d.Value())
- if err != nil {
- return nil, err
- }
+ // remove seal digest only
+ for _, d := range block.Header.Digest.Types {
+ // hack since substrate node_runtime can't seem to handle BABE pre-runtime digests
+ // with type prefix (ie Primary, Secondary...)
+ if bytes.Equal(in.version.SpecName(), []byte("node")) {
+ break
+ }
+
+ switch d.Value().(type) {
+ case types.SealDigest:
+ continue
+ default:
+ err = b.Header.Digest.Add(d.Value())
+ if err != nil {
+ return nil, err
}
}
}
diff --git a/lib/runtime/life/exports_test.go b/lib/runtime/life/exports_test.go
index 71581f0943..c583146056 100644
--- a/lib/runtime/life/exports_test.go
+++ b/lib/runtime/life/exports_test.go
@@ -31,7 +31,7 @@ func newInstanceFromGenesis(t *testing.T) runtime.Instance {
cfg.Storage = genState
cfg.LogLvl = 4
- instance, err := NewRuntimeFromGenesis(gen, cfg)
+ instance, err := NewRuntimeFromGenesis(cfg)
require.NoError(t, err)
return instance
}
@@ -230,7 +230,7 @@ func TestInstance_ExecuteBlock_KusamaRuntime_KusamaBlock1(t *testing.T) {
cfg.Storage = genState
cfg.LogLvl = 4
- instance, err := NewRuntimeFromGenesis(gen, cfg)
+ instance, err := NewRuntimeFromGenesis(cfg)
require.NoError(t, err)
// block data is received from querying a polkadot node
@@ -280,7 +280,7 @@ func TestInstance_ExecuteBlock_PolkadotRuntime_PolkadotBlock1(t *testing.T) {
cfg.Storage = genState
cfg.LogLvl = 5
- instance, err := NewRuntimeFromGenesis(gen, cfg)
+ instance, err := NewRuntimeFromGenesis(cfg)
require.NoError(t, err)
// block data is received from querying a polkadot node
@@ -308,5 +308,6 @@ func TestInstance_ExecuteBlock_PolkadotRuntime_PolkadotBlock1(t *testing.T) {
Body: *types.NewBody(types.BytesArrayToExtrinsics(exts)),
}
- _, _ = instance.ExecuteBlock(block) // TODO: fix
+ _, err = instance.ExecuteBlock(block)
+ require.NoError(t, err)
}
diff --git a/lib/runtime/life/instance.go b/lib/runtime/life/instance.go
index 72a244abe1..abadbc7679 100644
--- a/lib/runtime/life/instance.go
+++ b/lib/runtime/life/instance.go
@@ -24,9 +24,9 @@ import (
"sync"
"github.com/ChainSafe/gossamer/lib/common"
- "github.com/ChainSafe/gossamer/lib/genesis"
"github.com/ChainSafe/gossamer/lib/keystore"
"github.com/ChainSafe/gossamer/lib/runtime"
+
log "github.com/ChainSafe/log15"
"github.com/perlin-network/life/exec"
wasm_validation "github.com/perlin-network/life/wasm-validation"
@@ -61,13 +61,16 @@ func (*Instance) GetCodeHash() common.Hash {
}
// NewRuntimeFromGenesis creates a runtime instance from the genesis data
-func NewRuntimeFromGenesis(g *genesis.Genesis, cfg *Config) (runtime.Instance, error) { // TODO: simplify, get :code from storage
- codeStr := g.GenesisFields().Raw["top"][common.BytesToHex(common.CodeKey)]
- if codeStr == "" {
- return nil, fmt.Errorf("cannot find :code in genesis")
+func NewRuntimeFromGenesis(cfg *Config) (runtime.Instance, error) {
+ if cfg.Storage == nil {
+ return nil, errors.New("storage is nil")
+ }
+
+ code := cfg.Storage.LoadCode()
+ if len(code) == 0 {
+ return nil, fmt.Errorf("cannot find :code in state")
}
- code := common.MustHexToBytes(codeStr)
cfg.Resolver = new(Resolver)
return NewInstance(code, cfg)
}
@@ -113,7 +116,7 @@ func NewInstance(code []byte, cfg *Config) (*Instance, error) {
memory: instance.Memory,
}
- // TODO: use __heap_base
+ // TODO: use __heap_base (#1874)
allocator := runtime.NewAllocator(memory, 0)
runtimeCtx := &runtime.Context{
@@ -202,7 +205,7 @@ func (in *Instance) Exec(function string, data []byte) ([]byte, error) {
return nil, err
}
- offset, length := int64ToPointerAndSize(ret)
+ offset, length := runtime.Int64ToPointerAndSize(ret)
return in.vm.Memory[offset : offset+length], nil
}
@@ -228,15 +231,3 @@ func (*Instance) Validator() bool {
func (*Instance) Keystore() *keystore.GlobalKeystore {
return ctx.Keystore
}
-
-// TODO: move below to lib/runtime
-
-// int64ToPointerAndSize converts an int64 into a int32 pointer and a int32 length
-func int64ToPointerAndSize(in int64) (ptr, length int32) {
- return int32(in), int32(in >> 32)
-}
-
-// pointerAndSizeToInt64 converts int32 pointer and size to a int64
-func pointerAndSizeToInt64(ptr, size int32) int64 {
- return int64(ptr) | (int64(size) << 32)
-}
diff --git a/lib/runtime/life/resolver.go b/lib/runtime/life/resolver.go
index 2bf0e70948..1ab0ed3d14 100644
--- a/lib/runtime/life/resolver.go
+++ b/lib/runtime/life/resolver.go
@@ -22,7 +22,7 @@ import (
)
// Resolver resolves the imports for life
-type Resolver struct{} // TODO: move context inside resolver
+type Resolver struct{} // TODO: move context inside resolver (#1875)
// ResolveFunc ...
func (*Resolver) ResolveFunc(module, field string) exec.FunctionImport { // nolint
@@ -300,7 +300,7 @@ func ext_storage_set_version_1(vm *exec.VirtualMachine) int64 {
key := asMemorySlice(vm.Memory, keySpan)
value := asMemorySlice(vm.Memory, valueSpan)
- logger.Debug("[ext_storage_set_version_1]", "key", fmt.Sprintf("0x%x", key), "val", fmt.Sprintf("0x%x", value))
+ logger.Info("[ext_storage_set_version_1]", "key", fmt.Sprintf("0x%x", key), "val", fmt.Sprintf("0x%x", value))
cp := make([]byte, len(value))
copy(cp, value)
@@ -399,7 +399,7 @@ func ext_storage_read_version_1(vm *exec.VirtualMachine) int64 {
size = uint32(0)
} else {
size = uint32(len(value[offset:]))
- valueBuf, valueLen := int64ToPointerAndSize(valueOut)
+ valueBuf, valueLen := runtime.Int64ToPointerAndSize(valueOut)
copy(memory[valueBuf:valueBuf+valueLen], value[offset:])
}
@@ -586,7 +586,7 @@ func ext_default_child_storage_set_version_1(vm *exec.VirtualMachine) int64 {
logger.Error("[ext_default_child_storage_set_version_1] failed to set value in child storage", "error", err)
return 0
}
- // todo(ed) what is this supposed to return?
+
return 0
}
@@ -629,7 +629,7 @@ func ext_default_child_storage_read_version_1(vm *exec.VirtualMachine) int64 {
return 0
}
- valueBuf, valueLen := int64ToPointerAndSize(valueOut)
+ valueBuf, valueLen := runtime.Int64ToPointerAndSize(valueOut)
copy(memory[valueBuf:valueBuf+valueLen], value[offset:])
size := uint32(len(value[offset:]))
@@ -1122,7 +1122,7 @@ func ext_crypto_sr25519_verify_version_1(vm *exec.VirtualMachine) int64 {
if ok, err := pub.VerifyDeprecated(message, signature); err != nil || !ok {
logger.Debug("[ext_crypto_sr25519_verify_version_1] failed to validate signature", "error", err)
- // TODO: fix this, fails at block 3876
+ // this fails at block 3876, however based on discussions this seems to be expected
return 1
}
@@ -1278,26 +1278,20 @@ func ext_trie_blake2_256_root_version_1(vm *exec.VirtualMachine) int64 {
data := asMemorySlice(memory, dataSpan)
t := trie.NewEmptyTrie()
- // TODO: this is a fix for the length until slices of structs can be decoded
- // length passed in is the # of (key, value) tuples, but we are decoding as a slice of []byte
- data[0] = data[0] << 1
// this function is expecting an array of (key, value) tuples
- var kvs [][]byte
- err := scale.Unmarshal(data, &kvs)
- if err != nil {
- logger.Error("[ext_trie_blake2_256_root_version_1]", "error", err)
- return 0
+ type kv struct {
+ Key, Value []byte
}
- keyValues := kvs
- if len(keyValues)%2 != 0 { // TODO: this can be removed when we have decoding of slices of structs
- logger.Warn("[ext_trie_blake2_256_root_version_1] odd number of input key-values, skipping last value")
- keyValues = keyValues[:len(keyValues)-1]
+ var kvs []kv
+ if err := scale.Unmarshal(data, &kvs); err != nil {
+ logger.Error("[ext_trie_blake2_256_root_version_1]", "error", err)
+ return 0
}
- for i := 0; i < len(keyValues); i = i + 2 {
- t.Put(keyValues[i], keyValues[i+1])
+ for _, kv := range kvs {
+ t.Put(kv.Key, kv.Value)
}
// allocate memory for value and copy value to memory
@@ -1320,7 +1314,7 @@ func ext_trie_blake2_256_root_version_1(vm *exec.VirtualMachine) int64 {
// Convert 64bit wasm span descriptor to Go memory slice
func asMemorySlice(memory []byte, span int64) []byte {
- ptr, size := int64ToPointerAndSize(span)
+ ptr, size := runtime.Int64ToPointerAndSize(span)
return memory[ptr : ptr+size]
}
@@ -1368,7 +1362,7 @@ func toWasmMemory(memory, data []byte) (int64, error) {
}
copy(memory[out:out+size], data)
- return pointerAndSizeToInt64(int32(out), int32(size)), nil
+ return runtime.PointerAndSizeToInt64(int32(out), int32(size)), nil
}
// Wraps slice in optional and copies result to wasm memory. Returns resulting 64bit span descriptor
diff --git a/lib/runtime/life/resolver_test.go b/lib/runtime/life/resolver_test.go
index 98babab58b..1cbb1fea8f 100644
--- a/lib/runtime/life/resolver_test.go
+++ b/lib/runtime/life/resolver_test.go
@@ -287,17 +287,16 @@ func Test_ext_trie_blake2_256_ordered_root_version_1(t *testing.T) {
func Test_ext_storage_root_version_1(t *testing.T) {
inst := NewTestInstance(t, runtime.HOST_API_TEST_RUNTIME)
- // TODO determine why this fails when commented below is un-commented
- //testkey := []byte("noot")
- //testvalue := []byte("washere")
- //
- //encKey, err := scale.Marshal(testkey)
- //require.NoError(t, err)
- //encValue, err := scale.Marshal(testvalue)
- //require.NoError(t, err)
- //
- //_, err = inst.Exec("rtm_ext_storage_set_version_1", append(encKey, encValue...))
- //require.NoError(t, err)
+ testkey := []byte("noot")
+ testvalue := []byte("washere")
+
+ encKey, err := scale.Marshal(testkey)
+ require.NoError(t, err)
+ encValue, err := scale.Marshal(testvalue)
+ require.NoError(t, err)
+
+ _, err = inst.Exec("rtm_ext_storage_set_version_1", append(encKey, encValue...))
+ require.NoError(t, err)
ret, err := inst.Exec("rtm_ext_storage_root_version_1", []byte{})
require.NoError(t, err)
@@ -306,7 +305,10 @@ func Test_ext_storage_root_version_1(t *testing.T) {
err = scale.Unmarshal(ret, &hash)
require.NoError(t, err)
- expected := trie.EmptyHash
+ tt := trie.NewEmptyTrie()
+ tt.Put([]byte("noot"), []byte("washere"))
+
+ expected := tt.MustHash()
require.Equal(t, expected[:], hash)
}
diff --git a/lib/runtime/storage/trie.go b/lib/runtime/storage/trie.go
index aa9a97cb2a..f0ba2b1ffc 100644
--- a/lib/runtime/storage/trie.go
+++ b/lib/runtime/storage/trie.go
@@ -267,37 +267,6 @@ func (s *TrieState) GetKeysWithPrefixFromChild(keyToChild, prefix []byte) ([][]b
return child.GetKeysWithPrefix(prefix), nil
}
-// TODO: remove functions below
-
-// SetBalance sets the balance for a given public key
-func (s *TrieState) SetBalance(key [32]byte, balance uint64) error {
- skey, err := common.BalanceKey(key)
- if err != nil {
- return err
- }
-
- bb := make([]byte, 8)
- binary.LittleEndian.PutUint64(bb, balance)
-
- s.Set(skey, bb)
- return nil
-}
-
-// GetBalance returns the balance for a given public key
-func (s *TrieState) GetBalance(key [32]byte) (uint64, error) {
- skey, err := common.BalanceKey(key)
- if err != nil {
- return 0, err
- }
-
- bal := s.Get(skey)
- if len(bal) != 8 {
- return 0, nil
- }
-
- return binary.LittleEndian.Uint64(bal), nil
-}
-
// LoadCode returns the runtime code (located at :code)
func (s *TrieState) LoadCode() []byte {
return s.Get(common.CodeKey)
diff --git a/lib/runtime/wasmer/exports.go b/lib/runtime/wasmer/exports.go
index 602ea0d38d..25925dce7b 100644
--- a/lib/runtime/wasmer/exports.go
+++ b/lib/runtime/wasmer/exports.go
@@ -174,7 +174,6 @@ func (in *Instance) ExecuteBlock(block *types.Block) ([]byte, error) {
return nil, err
}
}
-
}
bdEnc, err := b.Encode()
diff --git a/lib/runtime/wasmer/exports_test.go b/lib/runtime/wasmer/exports_test.go
index 3f83dc241b..01b2a0bb6e 100644
--- a/lib/runtime/wasmer/exports_test.go
+++ b/lib/runtime/wasmer/exports_test.go
@@ -177,7 +177,7 @@ func TestInstance_Version_KusamaRuntime(t *testing.T) {
cfg.Storage = genState
cfg.LogLvl = 4
- instance, err := NewRuntimeFromGenesis(gen, cfg)
+ instance, err := NewRuntimeFromGenesis(cfg)
require.NoError(t, err)
expected := runtime.NewVersionData(
@@ -190,7 +190,6 @@ func TestInstance_Version_KusamaRuntime(t *testing.T) {
0,
)
- // TODO: why does kusama seem to use the old runtime version format?
version, err := instance.(*Instance).Version()
require.NoError(t, err)
@@ -302,7 +301,7 @@ func TestNodeRuntime_ValidateTransaction(t *testing.T) {
nodeStorage.BaseDB = runtime.NewInMemoryDB(t)
cfg.NodeStorage = nodeStorage
- rt, err := NewRuntimeFromGenesis(gen, cfg)
+ rt, err := NewRuntimeFromGenesis(cfg)
require.NoError(t, err)
alicePub := common.MustHexToBytes("0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d")
@@ -589,7 +588,7 @@ func TestInstance_ExecuteBlock_NodeRuntime(t *testing.T) {
}
func TestInstance_ExecuteBlock_GossamerRuntime(t *testing.T) {
- t.Skip() // TODO: fix timestamping issue
+ t.Skip() // TODO: this fails with "syscall frame is no longer valid" (#1026)
gen, err := genesis.NewGenesisFromJSONRaw("../../../chain/gssmr/genesis.json")
require.NoError(t, err)
@@ -604,7 +603,7 @@ func TestInstance_ExecuteBlock_GossamerRuntime(t *testing.T) {
cfg.Storage = genState
cfg.LogLvl = 4
- instance, err := NewRuntimeFromGenesis(gen, cfg)
+ instance, err := NewRuntimeFromGenesis(cfg)
require.NoError(t, err)
block := buildBlockVdt(t, instance, common.Hash{})
@@ -618,7 +617,7 @@ func TestInstance_ExecuteBlock_GossamerRuntime(t *testing.T) {
}
func TestInstance_ApplyExtrinsic_GossamerRuntime(t *testing.T) {
- t.Skip() // fails with "'Bad input data provided to validate_transaction: Codec error"
+ t.Skip() // TODO: this fails with "syscall frame is no longer valid" (#1026)
gen, err := genesis.NewGenesisFromJSONRaw("../../../chain/gssmr/genesis.json")
require.NoError(t, err)
@@ -633,7 +632,7 @@ func TestInstance_ApplyExtrinsic_GossamerRuntime(t *testing.T) {
cfg.Storage = genState
cfg.LogLvl = 4
- instance, err := NewRuntimeFromGenesis(gen, cfg)
+ instance, err := NewRuntimeFromGenesis(cfg)
require.NoError(t, err)
// reset state back to parent state before executing
@@ -641,8 +640,7 @@ func TestInstance_ApplyExtrinsic_GossamerRuntime(t *testing.T) {
require.NoError(t, err)
instance.SetContextStorage(parentState)
- // TODO: where did this hash come from??
- parentHash := common.MustHexToHash("0x35a28a7dbaf0ba07d1485b0f3da7757e3880509edc8c31d0850cb6dd6219361d")
+ parentHash := common.Hash{}
header, err := types.NewHeader(parentHash, common.Hash{}, common.Hash{}, big.NewInt(1), types.NewDigest())
require.NoError(t, err)
err = instance.InitializeBlock(header)
@@ -691,7 +689,7 @@ func TestInstance_ExecuteBlock_PolkadotRuntime_PolkadotBlock1(t *testing.T) {
cfg.Storage = genState
cfg.LogLvl = 5
- instance, err := NewRuntimeFromGenesis(gen, cfg)
+ instance, err := NewRuntimeFromGenesis(cfg)
require.NoError(t, err)
// block data is received from querying a polkadot node
@@ -742,7 +740,7 @@ func TestInstance_ExecuteBlock_KusamaRuntime_KusamaBlock1(t *testing.T) {
cfg.Storage = genState
cfg.LogLvl = 4
- instance, err := NewRuntimeFromGenesis(gen, cfg)
+ instance, err := NewRuntimeFromGenesis(cfg)
require.NoError(t, err)
// block data is received from querying a polkadot node
diff --git a/lib/runtime/wasmer/imports.go b/lib/runtime/wasmer/imports.go
index 3fa88f3fe3..812c57439a 100644
--- a/lib/runtime/wasmer/imports.go
+++ b/lib/runtime/wasmer/imports.go
@@ -661,7 +661,7 @@ func ext_crypto_sr25519_verify_version_1(context unsafe.Pointer, sig C.int32_t,
if ok, err := pub.VerifyDeprecated(message, signature); err != nil || !ok {
logger.Debug("[ext_crypto_sr25519_verify_version_1] failed to validate signature", "error", err)
- // TODO: fix this, fails at block 3876
+ // this fails at block 3876, which seems to be expected, based on discussions
return 1
}
@@ -715,7 +715,7 @@ func ext_crypto_sr25519_verify_version_2(context unsafe.Pointer, sig C.int32_t,
func ext_crypto_start_batch_verify_version_1(context unsafe.Pointer) {
logger.Debug("[ext_crypto_start_batch_verify_version_1] executing...")
- // TODO: fix and re-enable signature verification
+ // TODO: fix and re-enable signature verification (#1405)
// beginBatchVerify(context)
}
@@ -735,7 +735,7 @@ func beginBatchVerify(context unsafe.Pointer) { //nolint
func ext_crypto_finish_batch_verify_version_1(context unsafe.Pointer) C.int32_t {
logger.Debug("[ext_crypto_finish_batch_verify_version_1] executing...")
- // TODO: fix and re-enable signature verification
+ // TODO: fix and re-enable signature verification (#1405)
// return finishBatchVerify(context)
return 1
}
@@ -766,25 +766,20 @@ func ext_trie_blake2_256_root_version_1(context unsafe.Pointer, dataSpan C.int64
data := asMemorySlice(instanceContext, dataSpan)
t := trie.NewEmptyTrie()
- // TODO: this is a fix for the length until slices of structs can be decoded
- // length passed in is the # of (key, value) tuples, but we are decoding as a slice of []byte
- data[0] = data[0] << 1
+
+ type kv struct {
+ Key, Value []byte
+ }
// this function is expecting an array of (key, value) tuples
- var keyValues [][]byte
- err := scale.Unmarshal(data, &keyValues)
- if err != nil {
+ var kvs []kv
+ if err := scale.Unmarshal(data, &kvs); err != nil {
logger.Error("[ext_trie_blake2_256_root_version_1]", "error", err)
return 0
}
- if len(keyValues)%2 != 0 { // TODO: this can be removed when we have decoding of slices of structs
- logger.Warn("[ext_trie_blake2_256_root_version_1] odd number of input key-values, skipping last value")
- keyValues = keyValues[:len(keyValues)-1]
- }
-
- for i := 0; i < len(keyValues); i = i + 2 {
- t.Put(keyValues[i], keyValues[i+1])
+ for _, kv := range kvs {
+ t.Put(kv.Key, kv.Value)
}
// allocate memory for value and copy value to memory
@@ -825,7 +820,7 @@ func ext_trie_blake2_256_ordered_root_version_1(context unsafe.Pointer, dataSpan
for i, val := range values {
key, err := scale.Marshal(big.NewInt(int64(i))) //nolint
if err != nil {
- logger.Error("[ext_blake2_256_enumerated_trie_root]", "error", err)
+ logger.Error("[ext_trie_blake2_256_ordered_root_version_1]", "error", err)
return 0
}
logger.Trace("[ext_trie_blake2_256_ordered_root_version_1]", "key", key, "value", val)
@@ -941,7 +936,7 @@ func ext_default_child_storage_read_version_1(context unsafe.Pointer, childStora
return 0
}
- valueBuf, valueLen := int64ToPointerAndSize(int64(valueOut))
+ valueBuf, valueLen := runtime.Int64ToPointerAndSize(int64(valueOut))
copy(memory[valueBuf:valueBuf+valueLen], value[offset:])
size := uint32(len(value[offset:]))
@@ -1150,7 +1145,7 @@ func ext_default_child_storage_storage_kill_version_2(context unsafe.Pointer, ch
func ext_default_child_storage_storage_kill_version_3(context unsafe.Pointer, childStorageKeySpan, _ C.int64_t) C.int64_t {
logger.Debug("[ext_default_child_storage_storage_kill_version_3] executing...")
logger.Warn("[ext_default_child_storage_storage_kill_version_3] somewhat unimplemented")
- // TODO: need to use `limit` parameter
+ // TODO: need to use `limit` parameter (#1793)
instanceContext := wasm.IntoInstanceContext(context)
ctx := instanceContext.Data().(*runtime.Context)
@@ -1158,8 +1153,6 @@ func ext_default_child_storage_storage_kill_version_3(context unsafe.Pointer, ch
childStorageKey := asMemorySlice(instanceContext, childStorageKeySpan)
storage.DeleteChild(childStorageKey)
-
- // TODO: this function returns a `KillStorageResult` which may be `AllRemoved` (0) or `SomeRemaining` (1)
return 0
}
@@ -1677,7 +1670,7 @@ func ext_storage_clear_prefix_version_1(context unsafe.Pointer, prefixSpan C.int
func ext_storage_clear_prefix_version_2(context unsafe.Pointer, prefixSpan, _ C.int64_t) C.int64_t {
logger.Trace("[ext_storage_clear_prefix_version_2] executing...")
logger.Warn("[ext_storage_clear_prefix_version_2] somewhat unimplemented")
- // TODO: need to use unused `limit` parameter
+ // TODO: need to use unused `limit` parameter (#1792)
instanceContext := wasm.IntoInstanceContext(context)
ctx := instanceContext.Data().(*runtime.Context)
@@ -1778,7 +1771,7 @@ func ext_storage_read_version_1(context unsafe.Pointer, keySpan, valueOut C.int6
size = uint32(0)
} else {
size = uint32(len(value[offset:]))
- valueBuf, valueLen := int64ToPointerAndSize(int64(valueOut))
+ valueBuf, valueLen := runtime.Int64ToPointerAndSize(int64(valueOut))
copy(memory[valueBuf:valueBuf+valueLen], value[offset:])
}
@@ -1857,7 +1850,7 @@ func ext_storage_commit_transaction_version_1(context unsafe.Pointer) {
// Convert 64bit wasm span descriptor to Go memory slice
func asMemorySlice(context wasm.InstanceContext, span C.int64_t) []byte {
memory := context.Memory().Data()
- ptr, size := int64ToPointerAndSize(int64(span))
+ ptr, size := runtime.Int64ToPointerAndSize(int64(span))
return memory[ptr : ptr+size]
}
@@ -1878,7 +1871,7 @@ func toWasmMemory(context wasm.InstanceContext, data []byte) (int64, error) {
}
copy(memory[out:out+size], data)
- return pointerAndSizeToInt64(int32(out), int32(size)), nil
+ return runtime.PointerAndSizeToInt64(int32(out), int32(size)), nil
}
// Copy a byte slice of a fixed size to wasm memory and return resulting pointer
diff --git a/lib/runtime/wasmer/imports_test.go b/lib/runtime/wasmer/imports_test.go
index 46c3a0c1ff..d28af9bd05 100644
--- a/lib/runtime/wasmer/imports_test.go
+++ b/lib/runtime/wasmer/imports_test.go
@@ -352,7 +352,7 @@ func Test_ext_storage_read_version_1_again(t *testing.T) {
read, err := new(optional.Bytes).Decode(buf)
require.NoError(t, err)
val := read.Value()
- require.Equal(t, len(testvalue)-int(testoffset), len(val)) // TODO: fix
+ require.Equal(t, len(testvalue)-int(testoffset), len(val))
require.Equal(t, testvalue[testoffset:], val[:len(testvalue)-int(testoffset)])
}
@@ -418,7 +418,7 @@ func Test_ext_storage_set_version_1(t *testing.T) {
}
func Test_ext_offline_index_set_version_1(t *testing.T) {
- // TODO this currently fails with error could nat find exported function, determine how else to test this
+ // TODO this currently fails with error could not find exported function, add rtm_ func to tester wasm (#1026)
t.Skip()
inst := NewTestInstance(t, runtime.HOST_API_TEST_RUNTIME)
@@ -470,7 +470,7 @@ func Test_ext_crypto_ed25519_generate_version_1(t *testing.T) {
require.NoError(t, err)
mem := inst.vm.Memory.Data()
- // TODO: why is this SCALE encoded? it should just be a 32 byte buffer. may be due to way test runtime is written.
+ // this SCALE encoded, but it should just be a 32 byte buffer. may be due to way test runtime is written.
pubKeyBytes := mem[ret.ToI32()+1 : ret.ToI32()+1+32]
pubKey, err := ed25519.NewPublicKey(pubKeyBytes)
require.NoError(t, err)
diff --git a/lib/runtime/wasmer/instance.go b/lib/runtime/wasmer/instance.go
index 64720bdba1..f0c1fcfd3d 100644
--- a/lib/runtime/wasmer/instance.go
+++ b/lib/runtime/wasmer/instance.go
@@ -23,7 +23,6 @@ import (
"sync"
"github.com/ChainSafe/gossamer/lib/common"
- "github.com/ChainSafe/gossamer/lib/genesis"
"github.com/ChainSafe/gossamer/lib/keystore"
"github.com/ChainSafe/gossamer/lib/runtime"
"github.com/ChainSafe/gossamer/lib/trie"
@@ -61,13 +60,16 @@ type Instance struct {
}
// NewRuntimeFromGenesis creates a runtime instance from the genesis data
-func NewRuntimeFromGenesis(g *genesis.Genesis, cfg *Config) (runtime.Instance, error) { // TODO: simplify, get :code from storage
- codeStr := g.GenesisFields().Raw["top"][common.BytesToHex(common.CodeKey)]
- if codeStr == "" {
- return nil, fmt.Errorf("cannot find :code in genesis")
+func NewRuntimeFromGenesis(cfg *Config) (runtime.Instance, error) {
+ if cfg.Storage == nil {
+ return nil, errors.New("storage is nil")
+ }
+
+ code := cfg.Storage.LoadCode()
+ if len(code) == 0 {
+ return nil, fmt.Errorf("cannot find :code in state")
}
- code := common.MustHexToBytes(codeStr)
cfg.Imports = ImportsNodeRuntime
return NewInstance(code, cfg)
}
@@ -96,7 +98,6 @@ func NewInstanceFromFile(fp string, cfg *Config) (*Instance, error) {
// NewInstance instantiates a runtime from raw wasm bytecode
func NewInstance(code []byte, cfg *Config) (*Instance, error) {
- // TODO: verify that v0.8 specific funcs are available
return newInstance(code, cfg)
}
@@ -118,7 +119,7 @@ func newInstance(code []byte, cfg *Config) (*Instance, error) {
}
// Provide importable memory for newer runtimes
- // TODO: determine memory descriptor size that the runtime wants from the wasm.
+ // TODO: determine memory descriptor size that the runtime wants from the wasm. (#1268)
// should be doable w/ wasmer 1.0.0.
memory, err := wasm.NewMemory(23, 0)
if err != nil {
@@ -137,7 +138,7 @@ func newInstance(code []byte, cfg *Config) (*Instance, error) {
}
// TODO: get __heap_base exported value from runtime.
- // wasmer 0.3.x does not support this, but wasmer 1.0.0 does
+ // wasmer 0.3.x does not support this, but wasmer 1.0.0 does (#1268)
heapBase := runtime.DefaultHeapBase
// Assume imported memory is used if runtime does not export any
@@ -220,7 +221,7 @@ func (in *Instance) setupInstanceVM(code []byte) error {
}
// TODO: determine memory descriptor size that the runtime wants from the wasm.
- // should be doable w/ wasmer 1.0.0.
+ // should be doable w/ wasmer 1.0.0. (#1268)
memory, err := wasm.NewMemory(23, 0)
if err != nil {
return err
@@ -243,7 +244,7 @@ func (in *Instance) setupInstanceVM(code []byte) error {
}
// TODO: get __heap_base exported value from runtime.
- // wasmer 0.3.x does not support this, but wasmer 1.0.0 does
+ // wasmer 0.3.x does not support this, but wasmer 1.0.0 does (#1268)
heapBase := runtime.DefaultHeapBase
in.ctx.Allocator = runtime.NewAllocator(in.vm.Memory, heapBase)
@@ -319,7 +320,7 @@ func (in *Instance) exec(function string, data []byte) ([]byte, error) {
return nil, err
}
- offset, length := int64ToPointerAndSize(res.ToI64())
+ offset, length := runtime.Int64ToPointerAndSize(res.ToI64())
return in.load(offset, length), nil
}
@@ -350,13 +351,3 @@ func (in *Instance) Keystore() *keystore.GlobalKeystore {
func (in *Instance) Validator() bool {
return in.ctx.Validator
}
-
-// int64ToPointerAndSize converts an int64 into a int32 pointer and a int32 length
-func int64ToPointerAndSize(in int64) (ptr, length int32) {
- return int32(in), int32(in >> 32)
-}
-
-// pointerAndSizeToInt64 converts int32 pointer and size to a int64
-func pointerAndSizeToInt64(ptr, size int32) int64 {
- return int64(ptr) | (int64(size) << 32)
-}
diff --git a/lib/runtime/wasmer/instance_test.go b/lib/runtime/wasmer/instance_test.go
index 6a4334a851..e7cf4c86ed 100644
--- a/lib/runtime/wasmer/instance_test.go
+++ b/lib/runtime/wasmer/instance_test.go
@@ -40,10 +40,10 @@ func TestConcurrentRuntimeCalls(t *testing.T) {
func TestPointerSize(t *testing.T) {
in := int64(8) + int64(32)<<32
- ptr, length := int64ToPointerAndSize(in)
+ ptr, length := runtime.Int64ToPointerAndSize(in)
require.Equal(t, int32(8), ptr)
require.Equal(t, int32(32), length)
- res := pointerAndSizeToInt64(ptr, length)
+ res := runtime.PointerAndSizeToInt64(ptr, length)
require.Equal(t, in, res)
}
diff --git a/lib/trie/database_test.go b/lib/trie/database_test.go
index ecda191550..a75d39a4af 100644
--- a/lib/trie/database_test.go
+++ b/lib/trie/database_test.go
@@ -20,6 +20,7 @@ import (
"bytes"
"fmt"
"io/ioutil"
+ "os"
"testing"
"github.com/ChainSafe/chaindb"
@@ -28,10 +29,7 @@ import (
)
func newTestDB(t *testing.T) chaindb.Database {
- // TODO: dynamically get os.TMPDIR
- testDatadirPath, _ := ioutil.TempDir("/tmp", "test-datadir-*")
-
- // TODO: don't initialise new DB but pass it in
+ testDatadirPath, _ := ioutil.TempDir(os.TempDir(), "test-datadir-*")
db, err := utils.SetupDatabase(testDatadirPath, true)
require.NoError(t, err)
return chaindb.NewTable(db, "trie")
diff --git a/lib/trie/proof_test.go b/lib/trie/proof_test.go
index 9129d503c2..2340395363 100644
--- a/lib/trie/proof_test.go
+++ b/lib/trie/proof_test.go
@@ -50,6 +50,6 @@ func TestProofGeneration(t *testing.T) {
proof, err := GenerateProof(hash.ToBytes(), [][]byte{[]byte("catapulta"), []byte("catapora")}, memdb)
require.NoError(t, err)
- // TODO: use the verify_proof function to assert the tests
+ // TODO: use the verify_proof function to assert the tests (#1790)
require.Equal(t, 5, len(proof))
}
diff --git a/lib/trie/trie.go b/lib/trie/trie.go
index bd33b73ac1..ed6f8d73f4 100644
--- a/lib/trie/trie.go
+++ b/lib/trie/trie.go
@@ -270,7 +270,6 @@ func (t *Trie) insert(parent node, key []byte, value node) node {
n := t.updateBranch(p, key, value)
if p != nil && n != nil && n.isDirty() {
- // TODO: set all `Copy` nodes as dirty?
p.setDirty(true)
}
return n
diff --git a/lib/utils/utils.go b/lib/utils/utils.go
index 93127e64e4..c4b5947bb6 100644
--- a/lib/utils/utils.go
+++ b/lib/utils/utils.go
@@ -213,8 +213,6 @@ func LoadChainDB(basePath string) (*chaindb.BadgerDB, error) {
DataDir: basePath,
}
- // TODO: Open the db in readonly mode.
-
// Open already existing DB
db, err := chaindb.NewBadgerDB(cfg)
if err != nil {
diff --git a/tests/stress/stress_test.go b/tests/stress/stress_test.go
index bcd4e163e3..3a66367fc0 100644
--- a/tests/stress/stress_test.go
+++ b/tests/stress/stress_test.go
@@ -210,30 +210,6 @@ func TestSync_SingleSyncingNode(t *testing.T) {
}
}
-func TestSync_ManyProducers(t *testing.T) {
- // TODO: this fails with runtime: out of memory
- // this means when each node is connected to 8 other nodes, too much memory is being used.
- t.Skip()
-
- numNodes := 9 // 9 block producers
- utils.SetLogLevel(log.LvlInfo)
- nodes, err := utils.InitializeAndStartNodes(t, numNodes, utils.GenesisDefault, utils.ConfigDefault)
- require.NoError(t, err)
-
- defer func() {
- errList := utils.StopNodes(t, nodes)
- require.Len(t, errList, 0)
- }()
-
- numCmps := 100
- for i := 0; i < numCmps; i++ {
- t.Log("comparing...", i)
- _, err = compareBlocksByNumberWithRetry(t, nodes, strconv.Itoa(i))
- require.NoError(t, err, i)
- time.Sleep(time.Second)
- }
-}
-
func TestSync_Bench(t *testing.T) {
utils.SetLogLevel(log.LvlInfo)
numBlocks := 64
diff --git a/tests/utils/gossamer_utils.go b/tests/utils/gossamer_utils.go
index 72bd2a6eb0..69c130a54c 100644
--- a/tests/utils/gossamer_utils.go
+++ b/tests/utils/gossamer_utils.go
@@ -103,7 +103,6 @@ func InitGossamer(idx int, basePath, genesis, config string) (*Node, error) {
"--force",
)
- //add step for init
logger.Info("initialising gossamer...", "cmd", cmdInit)
stdOutInit, err := cmdInit.CombinedOutput()
if err != nil {
@@ -111,9 +110,7 @@ func InitGossamer(idx int, basePath, genesis, config string) (*Node, error) {
return nil, err
}
- // TODO: get init exit code to see if node was successfully initialised
logger.Info("initialised gossamer!", "node", idx)
-
return &Node{
Idx: idx,
RPCPort: strconv.Itoa(BaseRPCPort + idx),