diff --git a/Cargo.lock b/Cargo.lock index 0e02e40912b..a77997936d4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -31,7 +31,8 @@ dependencies = [ "slog-term", "slot_clock", "tempfile", - "tokio 0.2.23", + "tokio 0.3.4", + "tokio-compat-02", "types", "validator_dir", ] @@ -91,23 +92,46 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd2bc6d3f370b5666245ff421e231cba4353df936e26986d2918e61a8fd6aef6" dependencies = [ - "aes-soft", + "aes-soft 0.5.0", "aesni 0.8.0", "block-cipher", ] +[[package]] +name = "aes" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "884391ef1066acaa41e766ba8f596341b96e93ce34f9a43e7d24bf0a0eaf0561" +dependencies = [ + "aes-soft 0.6.4", + "aesni 0.10.0", + "cipher", +] + [[package]] name = "aes-ctr" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64c3b03608ea1c077228520a167cca2514dc7cd8100a81b30a2b38be985234e5" dependencies = [ - "aes-soft", + "aes-soft 0.5.0", "aesni 0.9.0", - "ctr", + "ctr 0.5.0", "stream-cipher", ] +[[package]] +name = "aes-ctr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7729c3cde54d67063be556aeac75a81330d802f0259500ca40cb52967f975763" +dependencies = [ + "aes-soft 0.6.4", + "aesni 0.10.0", + "cipher", + "ctr 0.6.0", +] + [[package]] name = "aes-gcm" version = "0.7.0" @@ -115,12 +139,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0301c9e9c443494d970a07885e8cf3e587bae8356a1d5abd0999068413f7205f" dependencies = [ "aead", - "aes", + "aes 0.5.0", "block-cipher", "ghash", "subtle 2.3.0", ] +[[package]] +name = "aes-gcm" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5278b5fabbb9bd46e24aa69b2fdea62c99088e0a950a9be40e3e0101298f88da" +dependencies = [ + "aead", + "aes 0.6.0", + "cipher", + "ctr 0.6.0", + "ghash", + "subtle 2.3.0", +] + [[package]] name = "aes-soft" version = "0.5.0" @@ -132,6 +170,16 @@ dependencies = [ "opaque-debug 0.3.0", ] +[[package]] +name = "aes-soft" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be14c7498ea50828a38d0e24a765ed2effe92a705885b57d029cd67d45744072" +dependencies = [ + "cipher", + "opaque-debug 0.3.0", +] + [[package]] name = "aesni" version = "0.8.0" @@ -153,6 +201,16 @@ dependencies = [ "stream-cipher", ] +[[package]] +name = "aesni" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea2e11f5e94c2f7d386164cc2aa1f97823fed6f259e486940a71c174dd01b0ce" +dependencies = [ + "cipher", + "opaque-debug 0.3.0", +] + [[package]] name = "ahash" version = "0.4.6" @@ -340,7 +398,7 @@ dependencies = [ "async-io", "async-mutex", "blocking", - "crossbeam-utils 0.8.0", + "crossbeam-utils 0.8.1", "futures-channel", "futures-core", "futures-io", @@ -414,7 +472,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf13118df3e3dce4b5ac930641343b91b656e4e72c8f8325838b01a4b1c9d45" dependencies = [ - "http 0.2.1", + "http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.11", "url 2.2.0", ] @@ -545,7 +603,7 @@ dependencies = [ "store", "task_executor", "tempfile", - "tokio 0.2.23", + "tokio 0.3.4", "tree_hash", "types", "websocket_server", @@ -583,7 +641,8 @@ dependencies = [ "slog-term", "store", "task_executor", - "tokio 0.2.23", + "tokio 0.3.4", + "tokio-compat-02", "types", ] @@ -798,7 +857,7 @@ dependencies = [ "slog-stdlog", "slog-term", "sloggers", - "tokio 0.2.23", + "tokio 0.3.4", "types", ] @@ -889,6 +948,12 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" +[[package]] +name = "bytes" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0dcbc35f504eb6fc275a6d20e4ebcda18cf50d40ba6fabff8c711fa16cb3b16" + [[package]] name = "bzip2" version = "0.3.3" @@ -994,6 +1059,15 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "cipher" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" +dependencies = [ + "generic-array 0.14.4", +] + [[package]] name = "clap" version = "2.33.3" @@ -1057,7 +1131,7 @@ dependencies = [ "task_executor", "time 0.2.23", "timer", - "tokio 0.2.23", + "tokio 0.3.4", "toml", "tree_hash", "types", @@ -1239,7 +1313,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dca26ee1f8d361640700bde38b2c37d8c22b3ce2d360e1fc1c74ea4b0aa7d775" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.0", + "crossbeam-utils 0.8.1", ] [[package]] @@ -1260,8 +1334,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" dependencies = [ "cfg-if 1.0.0", - "crossbeam-epoch 0.9.0", - "crossbeam-utils 0.8.0", + "crossbeam-epoch 0.9.1", + "crossbeam-utils 0.8.1", ] [[package]] @@ -1275,21 +1349,21 @@ dependencies = [ "crossbeam-utils 0.7.2", "lazy_static", "maybe-uninit", - "memoffset", + "memoffset 0.5.6", "scopeguard", ] [[package]] name = "crossbeam-epoch" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0f606a85340376eef0d6d8fec399e6d4a544d648386c6645eb6d0653b27d9f" +checksum = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d" dependencies = [ "cfg-if 1.0.0", "const_fn", - "crossbeam-utils 0.8.0", + "crossbeam-utils 0.8.1", "lazy_static", - "memoffset", + "memoffset 0.6.1", "scopeguard", ] @@ -1317,13 +1391,12 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec91540d98355f690a86367e566ecad2e9e579f230230eb7c21398372be73ea5" +checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d" dependencies = [ "autocfg 1.0.1", "cfg-if 1.0.0", - "const_fn", "lazy_static", ] @@ -1363,6 +1436,16 @@ dependencies = [ "subtle 2.3.0", ] +[[package]] +name = "crypto-mac" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4857fd85a0c34b3c3297875b747c1e02e06b6a0ea32dd892d8192b9ce0813ea6" +dependencies = [ + "generic-array 0.14.4", + "subtle 2.3.0", +] + [[package]] name = "csv" version = "1.1.4" @@ -1394,6 +1477,15 @@ dependencies = [ "stream-cipher", ] +[[package]] +name = "ctr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb4a30d54f7443bf3d6191dcd486aca19e67cb3c49fa7a06a319966346707e7f" +dependencies = [ + "cipher", +] + [[package]] name = "ctrlc" version = "3.1.7" @@ -1616,30 +1708,63 @@ checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" [[package]] name = "discv5" -version = "0.1.0-beta.1" -source = "git+https://github.com/sigp/discv5?rev=fba7ceb5cfebd219ebbad6ffdb5d8c31dc8e4bc0#fba7ceb5cfebd219ebbad6ffdb5d8c31dc8e4bc0" +version = "0.1.0-beta.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddad09a9bd70478c581a9f7e608a17962c5561a3ca7b1eb5b75cf1c6c3f124b8" dependencies = [ - "aes-ctr", - "aes-gcm", + "aes-ctr 0.6.0", + "aes-gcm 0.8.0", "arrayvec", "digest 0.9.0", "enr", "fnv", "futures 0.3.8", "hex", - "hkdf", + "hkdf 0.10.0", "k256", "lazy_static", - "libp2p-core 0.22.1", + "libp2p-core 0.23.1", "lru_time_cache", - "multihash", + "multihash 0.11.4", "parking_lot 0.11.1", "rand 0.7.3", "rlp", "sha2 0.9.2", "smallvec 1.5.0", - "socket2", - "tokio 0.2.23", + "tokio 0.3.4", + "tokio-util 0.5.0", + "tracing", + "tracing-subscriber", + "uint", + "zeroize", +] + +[[package]] +name = "discv5" +version = "0.1.0-beta.2" +source = "git+https://github.com/sigp/discv5?rev=f117b3ca56fa3dca2317270434634ff7106d391a#f117b3ca56fa3dca2317270434634ff7106d391a" +dependencies = [ + "aes-ctr 0.6.0", + "aes-gcm 0.8.0", + "arrayvec", + "digest 0.9.0", + "enr", + "fnv", + "futures 0.3.8", + "hex", + "hkdf 0.9.0", + "k256", + "lazy_static", + "libp2p-core 0.23.1", + "lru_time_cache", + "multihash 0.11.4", + "parking_lot 0.11.1", + "rand 0.7.3", + "rlp", + "sha2 0.9.2", + "smallvec 1.5.0", + "tokio 0.3.4", + "tokio-util 0.4.0", "tracing", "tracing-subscriber", "uint", @@ -1811,7 +1936,7 @@ dependencies = [ "slog-term", "sloggers", "task_executor", - "tokio 0.2.23", + "tokio 0.3.4", "types", ] @@ -1849,7 +1974,8 @@ dependencies = [ "sloggers", "state_processing", "task_executor", - "tokio 0.2.23", + "tokio 0.3.4", + "tokio-compat-02", "toml", "tree_hash", "types", @@ -1863,7 +1989,7 @@ dependencies = [ "deposit_contract", "futures 0.3.8", "serde_json", - "tokio 0.2.23", + "tokio 0.3.4", "types", "web3", ] @@ -1944,7 +2070,7 @@ dependencies = [ name = "eth2_keystore" version = "0.1.0" dependencies = [ - "aes-ctr", + "aes-ctr 0.5.0", "bls", "eth2_key_derivation", "eth2_ssz", @@ -1969,7 +2095,7 @@ dependencies = [ "base64 0.13.0", "directory", "dirs 3.0.1", - "discv5", + "discv5 0.1.0-beta.2 (registry+https://github.com/rust-lang/crates.io-index)", "error-chain", "eth2_ssz", "eth2_ssz_derive", @@ -1998,11 +2124,11 @@ dependencies = [ "task_executor", "tempdir", "tiny-keccak 2.0.2", - "tokio 0.2.23", + "tokio 0.3.4", "tokio-io-timeout", - "tokio-util", + "tokio-util 0.4.0", "types", - "unsigned-varint 0.3.3", + "unsigned-varint 0.5.1 (git+https://github.com/sigp/unsigned-varint?branch=dep-update)", "void", ] @@ -2509,7 +2635,8 @@ dependencies = [ "serde_derive", "slog", "state_processing", - "tokio 0.2.23", + "tokio 0.3.4", + "tokio-compat-02", "tree_hash", "types", ] @@ -2621,11 +2748,30 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http 0.2.1", + "http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "indexmap", "slab 0.4.2", "tokio 0.2.23", - "tokio-util", + "tokio-util 0.3.1", + "tracing", + "tracing-futures", +] + +[[package]] +name = "h2" +version = "0.3.0" +source = "git+https://github.com/agemanning/h2?branch=lighthouse#a59d6eba170ffaa7d9504b95c98b7321f459581b" +dependencies = [ + "bytes 0.6.0", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 0.2.1 (git+https://github.com/agemanning/http?branch=lighthouse)", + "indexmap", + "slab 0.4.2", + "tokio 0.3.4", + "tokio-util 0.5.0", "tracing", "tracing-futures", ] @@ -2659,7 +2805,8 @@ name = "hashset_delay" version = "0.2.0" dependencies = [ "futures 0.3.8", - "tokio 0.2.23", + "tokio 0.3.4", + "tokio-util 0.4.0", ] [[package]] @@ -2671,8 +2818,23 @@ dependencies = [ "base64 0.12.3", "bitflags 1.2.1", "bytes 0.5.6", - "headers-core", - "http 0.2.1", + "headers-core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.3.16", + "sha-1 0.8.2", + "time 0.1.44", +] + +[[package]] +name = "headers" +version = "0.3.2" +source = "git+https://github.com/blacktemplar/headers?branch=lighthouse#8bffbd8aa2e170745a81e62fc0d7e98c0a23a69a" +dependencies = [ + "base64 0.13.0", + "bitflags 1.2.1", + "bytes 0.6.0", + "headers-core 0.2.0 (git+https://github.com/blacktemplar/headers?branch=lighthouse)", + "http 0.2.1 (git+https://github.com/agemanning/http?branch=lighthouse)", "mime 0.3.16", "sha-1 0.8.2", "time 0.1.44", @@ -2684,7 +2846,15 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" dependencies = [ - "http 0.2.1", + "http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "headers-core" +version = "0.2.0" +source = "git+https://github.com/blacktemplar/headers?branch=lighthouse#8bffbd8aa2e170745a81e62fc0d7e98c0a23a69a" +dependencies = [ + "http 0.2.1 (git+https://github.com/agemanning/http?branch=lighthouse)", ] [[package]] @@ -2727,6 +2897,16 @@ dependencies = [ "hmac 0.8.1", ] +[[package]] +name = "hkdf" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51ab2f639c231793c5f6114bdb9bbe50a7dbbfcd7c7c6bd8475dec2d991e964f" +dependencies = [ + "digest 0.9.0", + "hmac 0.10.1", +] + [[package]] name = "hmac" version = "0.7.1" @@ -2757,6 +2937,16 @@ dependencies = [ "digest 0.9.0", ] +[[package]] +name = "hmac" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" +dependencies = [ + "crypto-mac 0.10.0", + "digest 0.9.0", +] + [[package]] name = "hmac-drbg" version = "0.2.0" @@ -2790,6 +2980,16 @@ dependencies = [ "itoa", ] +[[package]] +name = "http" +version = "0.2.1" +source = "git+https://github.com/agemanning/http?branch=lighthouse#144a8ad6334f40bf4f84e26cf582ff164795024a" +dependencies = [ + "bytes 0.6.0", + "fnv", + "itoa", +] + [[package]] name = "http-body" version = "0.1.0" @@ -2809,7 +3009,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" dependencies = [ "bytes 0.5.6", - "http 0.2.1", + "http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "http-body" +version = "0.3.1" +source = "git+https://github.com/agemanning/http-body?branch=lighthouse#a10365c24eaee8eab881519accad48b695b88ccf" +dependencies = [ + "bytes 0.6.0", + "http 0.2.1 (git+https://github.com/agemanning/http?branch=lighthouse)", ] [[package]] @@ -2818,7 +3027,7 @@ version = "0.1.0" dependencies = [ "beacon_chain", "bs58 0.3.1", - "discv5", + "discv5 0.1.0-beta.2 (git+https://github.com/sigp/discv5?rev=f117b3ca56fa3dca2317270434634ff7106d391a)", "environment", "eth1", "eth2", @@ -2836,7 +3045,8 @@ dependencies = [ "slot_clock", "state_processing", "store", - "tokio 0.2.23", + "tokio 0.3.4", + "tokio-compat-02", "tree_hash", "types", "warp", @@ -2860,7 +3070,8 @@ dependencies = [ "slog", "slot_clock", "store", - "tokio 0.2.23", + "tokio 0.3.4", + "tokio-compat-02", "types", "warp", "warp_utils", @@ -2888,7 +3099,7 @@ dependencies = [ "async-trait", "base64 0.13.0", "basic-cookies", - "crossbeam-utils 0.8.0", + "crossbeam-utils 0.8.1", "difference", "futures-util", "hyper 0.13.9", @@ -2974,8 +3185,8 @@ dependencies = [ "futures-core", "futures-util", "h2 0.2.7", - "http 0.2.1", - "http-body 0.3.1", + "http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "httparse", "httpdate", "itoa", @@ -2987,6 +3198,29 @@ dependencies = [ "want 0.3.0", ] +[[package]] +name = "hyper" +version = "0.14.0-dev" +source = "git+https://github.com/sigp/hyper?branch=lighthouse#6e24636c115108711fc2ac0c1cf81d4b3dce2277" +dependencies = [ + "bytes 0.6.0", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.3.0", + "http 0.2.1 (git+https://github.com/agemanning/http?branch=lighthouse)", + "http-body 0.3.1 (git+https://github.com/agemanning/http-body?branch=lighthouse)", + "httparse", + "httpdate", + "itoa", + "pin-project 1.0.2", + "socket2", + "tokio 0.3.4", + "tower-service", + "tracing", + "want 0.3.0", +] + [[package]] name = "hyper-tls" version = "0.3.2" @@ -3164,13 +3398,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "80aafab09693e9fa74b76ef207c55dc1cba5d9d5dc6dcc1b6a96d008a98000e9" dependencies = [ "bytes 0.5.6", - "crossbeam-utils 0.8.0", + "crossbeam-utils 0.8.1", "curl", "curl-sys", "encoding_rs", "flume", "futures-lite", - "http 0.2.1", + "http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.11", "mime 0.3.16", "once_cell", @@ -3343,7 +3577,8 @@ dependencies = [ "serde_yaml", "simple_logger", "state_processing", - "tokio 0.2.23", + "tokio 0.3.4", + "tokio-compat-02", "tree_hash", "types", "validator_dir", @@ -3420,14 +3655,14 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.30.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c#f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c" +version = "0.30.1" +source = "git+https://github.com/sigp/rust-libp2p?rev=e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25#e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25" dependencies = [ "atomic", "bytes 0.5.6", "futures 0.3.8", "lazy_static", - "libp2p-core 0.24.0", + "libp2p-core 0.25.0", "libp2p-core-derive", "libp2p-dns", "libp2p-gossipsub", @@ -3438,8 +3673,7 @@ dependencies = [ "libp2p-tcp", "libp2p-websocket", "libp2p-yamux", - "multihash", - "parity-multiaddr 0.9.3", + "parity-multiaddr 0.9.6 (git+https://github.com/sigp/rust-libp2p?rev=e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25)", "parking_lot 0.11.1", "pin-project 1.0.2", "smallvec 1.5.0", @@ -3448,9 +3682,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.22.1" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52f13ba8c7df0768af2eb391696d562c7de88cc3a35122531aaa6a7d77754d25" +checksum = "3960524389409633550567e8a9e0684d25a33f4f8408887ff897dd9fdfbdb771" dependencies = [ "asn1_der", "bs58 0.3.1", @@ -3462,31 +3696,32 @@ dependencies = [ "lazy_static", "libsecp256k1", "log 0.4.11", - "multihash", - "multistream-select 0.8.5", - "parity-multiaddr 0.9.6", - "parking_lot 0.10.2", - "pin-project 0.4.27", + "multihash 0.11.4", + "multistream-select 0.8.5 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-multiaddr 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.11.1", + "pin-project 1.0.2", "prost", "prost-build", "rand 0.7.3", "ring", "rw-stream-sink", - "sha2 0.8.2", + "sha2 0.9.2", "smallvec 1.5.0", "thiserror", - "unsigned-varint 0.4.0", + "unsigned-varint 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "void", "zeroize", ] [[package]] name = "libp2p-core" -version = "0.24.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c#f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c" +version = "0.25.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25#e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25" dependencies = [ "asn1_der", - "bs58 0.3.1", + "bs58 0.4.0", + "bytes 0.5.6", "ed25519-dalek", "either", "fnv", @@ -3495,9 +3730,9 @@ dependencies = [ "lazy_static", "libsecp256k1", "log 0.4.11", - "multihash", - "multistream-select 0.8.4", - "parity-multiaddr 0.9.3", + "multihash 0.13.1", + "multistream-select 0.8.5 (git+https://github.com/sigp/rust-libp2p?rev=e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25)", + "parity-multiaddr 0.9.6 (git+https://github.com/sigp/rust-libp2p?rev=e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25)", "parking_lot 0.11.1", "pin-project 1.0.2", "prost", @@ -3508,7 +3743,7 @@ dependencies = [ "sha2 0.9.2", "smallvec 1.5.0", "thiserror", - "unsigned-varint 0.5.1", + "unsigned-varint 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "void", "zeroize", ] @@ -3516,7 +3751,7 @@ dependencies = [ [[package]] name = "libp2p-core-derive" version = "0.20.2" -source = "git+https://github.com/sigp/rust-libp2p?rev=f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c#f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c" +source = "git+https://github.com/sigp/rust-libp2p?rev=e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25#e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25" dependencies = [ "quote", "syn", @@ -3524,18 +3759,18 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.24.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c#f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c" +version = "0.25.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25#e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25" dependencies = [ "futures 0.3.8", - "libp2p-core 0.24.0", + "libp2p-core 0.25.0", "log 0.4.11", ] [[package]] name = "libp2p-gossipsub" -version = "0.24.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c#f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c" +version = "0.25.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25#e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25" dependencies = [ "base64 0.13.0", "byteorder", @@ -3544,7 +3779,7 @@ dependencies = [ "futures 0.3.8", "futures_codec", "hex_fmt", - "libp2p-core 0.24.0", + "libp2p-core 0.25.0", "libp2p-swarm", "log 0.4.11", "prost", @@ -3552,17 +3787,17 @@ dependencies = [ "rand 0.7.3", "sha2 0.9.2", "smallvec 1.5.0", - "unsigned-varint 0.5.1", + "unsigned-varint 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "wasm-timer", ] [[package]] name = "libp2p-identify" -version = "0.24.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c#f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c" +version = "0.25.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25#e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25" dependencies = [ "futures 0.3.8", - "libp2p-core 0.24.0", + "libp2p-core 0.25.0", "libp2p-swarm", "log 0.4.11", "prost", @@ -3573,31 +3808,31 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.24.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c#f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c" +version = "0.25.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25#e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25" dependencies = [ "bytes 0.5.6", "futures 0.3.8", "futures_codec", - "libp2p-core 0.24.0", + "libp2p-core 0.25.0", "log 0.4.11", "nohash-hasher", "parking_lot 0.11.1", "rand 0.7.3", "smallvec 1.5.0", - "unsigned-varint 0.5.1", + "unsigned-varint 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "libp2p-noise" -version = "0.26.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c#f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c" +version = "0.27.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25#e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25" dependencies = [ "bytes 0.5.6", "curve25519-dalek", "futures 0.3.8", "lazy_static", - "libp2p-core 0.24.0", + "libp2p-core 0.25.0", "log 0.4.11", "prost", "prost-build", @@ -3611,12 +3846,12 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.24.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c#f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c" +version = "0.25.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25#e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25" dependencies = [ "either", "futures 0.3.8", - "libp2p-core 0.24.0", + "libp2p-core 0.25.0", "log 0.4.11", "rand 0.7.3", "smallvec 1.5.0", @@ -3626,28 +3861,28 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.24.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c#f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c" +version = "0.25.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25#e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25" dependencies = [ "futures 0.3.8", "futures-timer", "if-addrs", "ipnet", - "libp2p-core 0.24.0", + "libp2p-core 0.25.0", "log 0.4.11", "socket2", - "tokio 0.2.23", + "tokio 0.3.4", ] [[package]] name = "libp2p-websocket" -version = "0.25.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c#f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c" +version = "0.26.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25#e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25" dependencies = [ "async-tls", "either", "futures 0.3.8", - "libp2p-core 0.24.0", + "libp2p-core 0.25.0", "log 0.4.11", "quicksink", "rustls", @@ -3660,11 +3895,11 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.27.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c#f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c" +version = "0.28.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25#e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25" dependencies = [ "futures 0.3.8", - "libp2p-core 0.24.0", + "libp2p-core 0.25.0", "parking_lot 0.11.1", "thiserror", "yamux", @@ -3734,7 +3969,8 @@ dependencies = [ "slog-term", "sloggers", "tempfile", - "tokio 0.2.23", + "tokio 0.3.4", + "tokio-compat-02", "types", "validator_client", "validator_dir", @@ -3924,6 +4160,15 @@ dependencies = [ "autocfg 1.0.1", ] +[[package]] +name = "memoffset" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "157b4208e3059a8f9e78d559edc658e13df41410cb3ae03979c83130067fdd87" +dependencies = [ + "autocfg 1.0.1", +] + [[package]] name = "merkle_proof" version = "0.2.0" @@ -4011,6 +4256,19 @@ dependencies = [ "winapi 0.2.8", ] +[[package]] +name = "mio" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f33bc887064ef1fd66020c9adfc45bb9f33d75a42096c81e7c56c65b75dd1a8b" +dependencies = [ + "libc", + "log 0.4.11", + "miow 0.3.6", + "ntapi", + "winapi 0.3.9", +] + [[package]] name = "mio-extras" version = "2.0.6" @@ -4019,7 +4277,7 @@ checksum = "52403fe290012ce777c4626790c8951324a2b9e3316b3143779c72b029742f19" dependencies = [ "lazycell", "log 0.4.11", - "mio", + "mio 0.6.22", "slab 0.4.2", ] @@ -4030,7 +4288,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0840c1c50fd55e521b247f949c241c9997709f23bd7f023b9762cd561e935656" dependencies = [ "log 0.4.11", - "mio", + "mio 0.6.22", "miow 0.3.6", "winapi 0.3.9", ] @@ -4043,7 +4301,7 @@ checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" dependencies = [ "iovec", "libc", - "mio", + "mio 0.6.22", ] [[package]] @@ -4080,7 +4338,34 @@ dependencies = [ "sha-1 0.9.2", "sha2 0.9.2", "sha3", - "unsigned-varint 0.5.1", + "unsigned-varint 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "multihash" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb63389ee5fcd4df3f8727600f4a0c3df53c541f0ed4e8b50a9ae51a80fc1efe" +dependencies = [ + "digest 0.9.0", + "generic-array 0.14.4", + "multihash-derive", + "sha2 0.9.2", + "unsigned-varint 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "multihash-derive" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f5653449cd45d502a53480ee08d7a599e8f4893d2bacb33c63d65bc20af6c1a" +dependencies = [ + "proc-macro-crate", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", + "synstructure", ] [[package]] @@ -4109,29 +4394,29 @@ dependencies = [ [[package]] name = "multistream-select" -version = "0.8.4" -source = "git+https://github.com/sigp/rust-libp2p?rev=f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c#f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93faf2e41f9ee62fb01680ed48f3cc26652352327aa2e59869070358f6b7dd75" dependencies = [ "bytes 0.5.6", "futures 0.3.8", "log 0.4.11", "pin-project 1.0.2", "smallvec 1.5.0", - "unsigned-varint 0.5.1", + "unsigned-varint 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "multistream-select" version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93faf2e41f9ee62fb01680ed48f3cc26652352327aa2e59869070358f6b7dd75" +source = "git+https://github.com/sigp/rust-libp2p?rev=e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25#e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25" dependencies = [ "bytes 0.5.6", "futures 0.3.8", "log 0.4.11", "pin-project 1.0.2", "smallvec 1.5.0", - "unsigned-varint 0.5.1", + "unsigned-varint 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4200,6 +4485,8 @@ dependencies = [ "rand 0.7.3", "rlp", "slog", + "slog-async", + "slog-term", "sloggers", "slot_clock", "smallvec 1.5.0", @@ -4207,7 +4494,7 @@ dependencies = [ "store", "task_executor", "tempfile", - "tokio 0.2.23", + "tokio 0.3.4", "tree_hash", "types", ] @@ -4274,6 +4561,15 @@ version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf51a729ecf40266a2368ad335a5fdde43471f545a967109cd62146ecf8b66ff" +[[package]] +name = "ntapi" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "num-bigint" version = "0.3.1" @@ -4438,36 +4734,36 @@ dependencies = [ [[package]] name = "parity-multiaddr" -version = "0.9.3" -source = "git+https://github.com/sigp/rust-libp2p?rev=f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c#f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43244a26dc1ddd3097216bb12eaa6cf8a07b060c72718d9ebd60fd297d6401df" dependencies = [ "arrayref", - "bs58 0.3.1", + "bs58 0.4.0", "byteorder", "data-encoding", - "multihash", + "multihash 0.11.4", "percent-encoding 2.1.0", "serde", "static_assertions", - "unsigned-varint 0.5.1", + "unsigned-varint 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "url 2.2.0", ] [[package]] name = "parity-multiaddr" version = "0.9.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43244a26dc1ddd3097216bb12eaa6cf8a07b060c72718d9ebd60fd297d6401df" +source = "git+https://github.com/sigp/rust-libp2p?rev=e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25#e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25" dependencies = [ "arrayref", "bs58 0.4.0", "byteorder", "data-encoding", - "multihash", + "multihash 0.13.1", "percent-encoding 2.1.0", "serde", "static_assertions", - "unsigned-varint 0.5.1", + "unsigned-varint 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "url 2.2.0", ] @@ -4720,11 +5016,11 @@ dependencies = [ [[package]] name = "polyval" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5884790f1ce3553ad55fec37b5aaac5882e0e845a2612df744d6c85c9bf046c" +checksum = "b3fd900a291ceb8b99799cc8cd3d1d3403a51721e015bc533528b2ceafcc443c" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "universal-hash", ] @@ -4753,6 +5049,15 @@ dependencies = [ "uint", ] +[[package]] +name = "proc-macro-crate" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +dependencies = [ + "toml", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -5221,7 +5526,7 @@ checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a" dependencies = [ "crossbeam-channel 0.5.0", "crossbeam-deque 0.8.0", - "crossbeam-utils 0.8.0", + "crossbeam-utils 0.8.1", "lazy_static", "num_cpus", ] @@ -5319,7 +5624,7 @@ dependencies = [ "clap", "environment", "futures 0.3.8", - "hyper 0.13.9", + "hyper 0.14.0-dev", "lazy_static", "regex", "remote_signer_backend", @@ -5380,8 +5685,8 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "http 0.2.1", - "http-body 0.3.1", + "http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.13.9", "hyper-tls 0.4.3", "ipnet", @@ -5395,7 +5700,7 @@ dependencies = [ "pin-project-lite 0.2.0", "serde", "serde_json", - "serde_urlencoded 0.7.0", + "serde_urlencoded", "tokio 0.2.23", "tokio-tls 0.3.1", "url 2.2.0", @@ -5463,14 +5768,14 @@ dependencies = [ [[package]] name = "rust-argon2" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dab61250775933275e84053ac235621dfb739556d5c54a2f2e9313b7cf43a19" +checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" dependencies = [ - "base64 0.12.3", + "base64 0.13.0", "blake2b_simd", "constant_time_eq", - "crossbeam-utils 0.7.2", + "crossbeam-utils 0.8.1", ] [[package]] @@ -5736,18 +6041,6 @@ dependencies = [ "syn", ] -[[package]] -name = "serde_urlencoded" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" -dependencies = [ - "dtoa", - "itoa", - "serde", - "url 2.2.0", -] - [[package]] name = "serde_urlencoded" version = "0.7.0" @@ -5904,7 +6197,8 @@ dependencies = [ "node_test_rig", "parking_lot 0.11.1", "rayon", - "tokio 0.2.23", + "tokio 0.3.4", + "tokio-compat-02", "types", "validator_client", ] @@ -6119,7 +6413,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "795dd7aeeee24468e5a32661f6d27f7b5cbed802031b2d7640c7b10f8fb2dd50" dependencies = [ - "aes-gcm", + "aes-gcm 0.7.0", "blake2", "chacha20poly1305", "rand 0.7.3", @@ -6367,9 +6661,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.50" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "443b4178719c5a851e1bde36ce12da21d74a0e60b4d982ec3385a933c812f0f6" +checksum = "3b4f34193997d92804d359ed09953e25d5138df6bcc055a71bf68ee89fdf9223" dependencies = [ "proc-macro2", "quote", @@ -6415,7 +6709,8 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "slog", - "tokio 0.2.23", + "tokio 0.3.4", + "tokio-compat-02", ] [[package]] @@ -6586,7 +6881,7 @@ dependencies = [ "slog", "slot_clock", "task_executor", - "tokio 0.2.23", + "tokio 0.3.4", "types", ] @@ -6659,7 +6954,7 @@ checksum = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6" dependencies = [ "bytes 0.4.12", "futures 0.1.30", - "mio", + "mio 0.6.22", "num_cpus", "tokio-codec", "tokio-current-thread", @@ -6688,14 +6983,36 @@ dependencies = [ "lazy_static", "libc", "memchr", - "mio", + "mio 0.6.22", "mio-named-pipes", "mio-uds", "num_cpus", "pin-project-lite 0.1.11", "signal-hook-registry", "slab 0.4.2", - "tokio-macros", + "tokio-macros 0.2.6", + "winapi 0.3.9", +] + +[[package]] +name = "tokio" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dfe2523e6fa84ddf5e688151d4e5fddc51678de9752c6512a24714c23818d61" +dependencies = [ + "autocfg 1.0.1", + "bytes 0.6.0", + "futures-core", + "lazy_static", + "libc", + "memchr", + "mio 0.7.6", + "num_cpus", + "parking_lot 0.11.1", + "pin-project-lite 0.2.0", + "signal-hook-registry", + "slab 0.4.2", + "tokio-macros 0.3.1", "winapi 0.3.9", ] @@ -6721,6 +7038,19 @@ dependencies = [ "tokio-io", ] +[[package]] +name = "tokio-compat-02" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb4cec419b8b6f06c32e74aae6d8c5e79646d038a38e5ea2b36045f2c3296e22" +dependencies = [ + "bytes 0.5.6", + "once_cell", + "pin-project-lite 0.1.11", + "tokio 0.2.23", + "tokio 0.3.4", +] + [[package]] name = "tokio-core" version = "0.1.17" @@ -6731,7 +7061,7 @@ dependencies = [ "futures 0.1.30", "iovec", "log 0.4.11", - "mio", + "mio 0.6.22", "scoped-tls 0.1.2", "tokio 0.1.22", "tokio-executor", @@ -6784,12 +7114,11 @@ dependencies = [ [[package]] name = "tokio-io-timeout" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9390a43272c8a6ac912ed1d1e2b6abeafd5047e05530a2fa304deee041a06215" +checksum = "6654a6da4326b0b4228000891d44fbcbdaa1904c6ddfa06617230649073be8fb" dependencies = [ - "bytes 0.5.6", - "tokio 0.2.23", + "tokio 0.3.4", ] [[package]] @@ -6803,6 +7132,17 @@ dependencies = [ "syn", ] +[[package]] +name = "tokio-macros" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21d30fdbb5dc2d8f91049691aa1a9d4d4ae422a21c334ce8936e5886d30c5c45" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "tokio-reactor" version = "0.1.12" @@ -6813,7 +7153,7 @@ dependencies = [ "futures 0.1.30", "lazy_static", "log 0.4.11", - "mio", + "mio 0.6.22", "num_cpus", "parking_lot 0.9.0", "slab 0.4.2", @@ -6841,7 +7181,7 @@ dependencies = [ "bytes 0.4.12", "futures 0.1.30", "iovec", - "mio", + "mio 0.6.22", "tokio-io", "tokio-reactor", ] @@ -6908,14 +7248,14 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d9e878ad426ca286e4dcae09cbd4e1973a7f8987d97570e2469703dd7f5720c" +checksum = "0381c1e6e08908317cee104781ca48afe03f37cc857792b85f01f9828fb55ba3" dependencies = [ "futures-util", "log 0.4.11", - "pin-project 0.4.27", - "tokio 0.2.23", + "pin-project 1.0.2", + "tokio 0.3.4", "tungstenite", ] @@ -6928,7 +7268,7 @@ dependencies = [ "bytes 0.4.12", "futures 0.1.30", "log 0.4.11", - "mio", + "mio 0.6.22", "tokio-codec", "tokio-io", "tokio-reactor", @@ -6945,7 +7285,7 @@ dependencies = [ "iovec", "libc", "log 0.3.9", - "mio", + "mio 0.6.22", "mio-uds", "tokio-core", "tokio-io", @@ -6962,7 +7302,7 @@ dependencies = [ "iovec", "libc", "log 0.4.11", - "mio", + "mio 0.6.22", "mio-uds", "tokio-codec", "tokio-io", @@ -6977,13 +7317,43 @@ checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" dependencies = [ "bytes 0.5.6", "futures-core", - "futures-io", "futures-sink", "log 0.4.11", "pin-project-lite 0.1.11", "tokio 0.2.23", ] +[[package]] +name = "tokio-util" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24793699f4665ba0416ed287dc794fe6b11a4aa5e4e95b58624f45f6c46b97d4" +dependencies = [ + "bytes 0.5.6", + "futures-core", + "futures-io", + "futures-sink", + "log 0.4.11", + "pin-project-lite 0.1.11", + "slab 0.4.2", + "tokio 0.3.4", +] + +[[package]] +name = "tokio-util" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73af76301319bcacf00d26d3c75534ef248dcad7ceaf36d93ec902453c3b1706" +dependencies = [ + "bytes 0.6.0", + "futures-core", + "futures-sink", + "log 0.4.11", + "pin-project-lite 0.1.11", + "slab 0.4.2", + "tokio 0.3.4", +] + [[package]] name = "toml" version = "0.5.7" @@ -7147,7 +7517,7 @@ dependencies = [ "base64 0.12.3", "byteorder", "bytes 0.5.6", - "http 0.2.1", + "http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "httparse", "input_buffer", "log 0.4.11", @@ -7276,9 +7646,9 @@ dependencies = [ [[package]] name = "unicode-segmentation" -version = "1.7.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db8716a166f290ff49dabc18b44aa407cb7c6dbe1aa0971b44b8a24b0ca35aae" +checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" [[package]] name = "unicode-width" @@ -7304,27 +7674,21 @@ dependencies = [ [[package]] name = "unsigned-varint" -version = "0.3.3" -source = "git+https://github.com/sigp/unsigned-varint?branch=latest-codecs#76fc423494e59f1ec4c8948bd0d3ae3c09851909" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7fdeedbf205afadfe39ae559b75c3240f24e257d0ca27e85f85cb82aa19ac35" dependencies = [ "bytes 0.5.6", - "tokio-util", + "futures_codec", ] -[[package]] -name = "unsigned-varint" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "669d776983b692a906c881fcd0cfb34271a48e197e4d6cb8df32b05bfc3d3fa5" - [[package]] name = "unsigned-varint" version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fdeedbf205afadfe39ae559b75c3240f24e257d0ca27e85f85cb82aa19ac35" +source = "git+https://github.com/sigp/unsigned-varint?branch=dep-update#b9f8b1ec8b2dbb1d5d27db9601be758065a4d8a6" dependencies = [ "bytes 0.5.6", - "futures_codec", + "tokio-util 0.4.0", ] [[package]] @@ -7356,12 +7720,6 @@ dependencies = [ "percent-encoding 2.1.0", ] -[[package]] -name = "urlencoding" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9232eb53352b4442e40d7900465dfc534e8cb2dc8f18656fcb2ac16112b5593" - [[package]] name = "utf-8" version = "0.7.5" @@ -7425,7 +7783,8 @@ dependencies = [ "slot_clock", "tempdir", "tempfile", - "tokio 0.2.23", + "tokio 0.3.4", + "tokio-compat-02", "tree_hash", "types", "validator_dir", @@ -7528,28 +7887,28 @@ dependencies = [ [[package]] name = "warp" version = "0.2.5" -source = "git+https://github.com/paulhauner/warp?branch=cors-wildcard#a7685b76d70c3e5628e31d60aee510acec3c5c30" +source = "git+https://github.com/sigp/warp?branch=lighthouse#21b36a987ecc1b3c50c1314578ded64d12f742c6" dependencies = [ - "bytes 0.5.6", + "bytes 0.6.0", "futures 0.3.8", - "headers", - "http 0.2.1", - "hyper 0.13.9", + "headers 0.3.2 (git+https://github.com/blacktemplar/headers?branch=lighthouse)", + "http 0.2.1 (git+https://github.com/agemanning/http?branch=lighthouse)", + "hyper 0.14.0-dev", "log 0.4.11", "mime 0.3.16", "mime_guess", "multipart", - "pin-project 0.4.27", + "percent-encoding 2.1.0", + "pin-project 1.0.2", "scoped-tls 1.0.0", "serde", "serde_json", - "serde_urlencoded 0.6.1", - "tokio 0.2.23", + "serde_urlencoded", + "tokio 0.3.4", "tokio-tungstenite", "tower-service", "tracing", "tracing-futures", - "urlencoding", ] [[package]] @@ -7558,13 +7917,13 @@ version = "0.1.0" dependencies = [ "beacon_chain", "eth2", - "headers", + "headers 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static", "lighthouse_metrics", "safe_arith", "serde", "state_processing", - "tokio 0.2.23", + "tokio 0.3.4", "types", "warp", ] @@ -7781,7 +8140,7 @@ dependencies = [ "serde_derive", "slog", "task_executor", - "tokio 0.2.23", + "tokio 0.3.4", "types", "ws", ] @@ -7866,7 +8225,7 @@ dependencies = [ "bytes 0.4.12", "httparse", "log 0.4.11", - "mio", + "mio 0.6.22", "mio-extras", "rand 0.7.3", "sha-1 0.8.2", diff --git a/Makefile b/Makefile index bd020bfac15..108a41b6cae 100644 --- a/Makefile +++ b/Makefile @@ -140,7 +140,7 @@ audit: # # Tracking issue: # https://github.com/sigp/lighthouse/issues/1669 - cargo audit --ignore RUSTSEC-2020-0043 + cargo audit --ignore RUSTSEC-2020-0043 --ignore RUSTSEC-2016-0002 --ignore RUSTSEC-2020-0008 --ignore RUSTSEC-2017-0002 # Runs `cargo udeps` to check for unused dependencies udeps: diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index dc901f0d7f8..6294c0c0c96 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -20,20 +20,21 @@ eth2_ssz_derive = "0.1.0" hex = "0.4.2" rayon = "1.4.1" eth2_testnet_config = { path = "../common/eth2_testnet_config" } -futures = { version = "0.3.5", features = ["compat"] } +futures = { version = "0.3.7", features = ["compat"] } clap_utils = { path = "../common/clap_utils" } directory = { path = "../common/directory" } eth2_wallet = { path = "../crypto/eth2_wallet" } eth2_wallet_manager = { path = "../common/eth2_wallet_manager" } rand = "0.7.3" validator_dir = { path = "../common/validator_dir" } -tokio = { version = "0.2.22", features = ["full"] } +tokio = { version = "0.3.2", features = ["full"] } eth2_keystore = { path = "../crypto/eth2_keystore" } account_utils = { path = "../common/account_utils" } slashing_protection = { path = "../validator_client/slashing_protection" } eth2 = {path = "../common/eth2"} safe_arith = {path = "../consensus/safe_arith"} slot_clock = { path = "../common/slot_clock" } +tokio-compat-02 = "0.1" [dev-dependencies] tempfile = "3.1.0" diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index 492081afb57..65c62786d1c 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -12,6 +12,7 @@ use safe_arith::SafeArith; use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::path::PathBuf; use std::time::Duration; +use tokio_compat_02::FutureExt; use types::{ChainSpec, Epoch, EthSpec, Fork, VoluntaryExit}; pub const CMD: &str = "exit"; @@ -58,7 +59,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) } -pub fn cli_run(matches: &ArgMatches, mut env: Environment) -> Result<(), String> { +pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result<(), String> { let keystore_path: PathBuf = clap_utils::parse_required(matches, KEYSTORE_FLAG)?; let password_file_path: Option = clap_utils::parse_optional(matches, PASSWORD_FILE_FLAG)?; @@ -76,14 +77,17 @@ pub fn cli_run(matches: &ArgMatches, mut env: Environment) -> Res .clone() .expect("network should have a valid config"); - env.runtime().block_on(publish_voluntary_exit::( - &keystore_path, - password_file_path.as_ref(), - &client, - &spec, - stdin_inputs, - &testnet_config, - ))?; + env.runtime().block_on( + publish_voluntary_exit::( + &keystore_path, + password_file_path.as_ref(), + &client, + &spec, + stdin_inputs, + &testnet_config, + ) + .compat(), + )?; Ok(()) } @@ -155,7 +159,7 @@ async fn publish_voluntary_exit( .post_beacon_pool_voluntary_exits(&signed_voluntary_exit) .await .map_err(|e| format!("Failed to publish voluntary exit: {}", e))?; - tokio::time::delay_for(std::time::Duration::from_secs(1)).await; // Provides nicer UX. + tokio::time::sleep(std::time::Duration::from_secs(1)).await; // Provides nicer UX. eprintln!( "Successfully validated and published voluntary exit for validator {}", keypair.pk diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index f5633d10d66..21806339526 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -10,6 +10,7 @@ path = "src/lib.rs" [dev-dependencies] node_test_rig = { path = "../testing/node_test_rig" } +tokio-compat-02 = "0.1" [features] write_ssz_files = ["beacon_chain/write_ssz_files"] # Writes debugging .ssz files to /tmp during block processing. @@ -26,12 +27,12 @@ slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_tr slog-term = "2.6.0" slog-async = "2.5.0" ctrlc = { version = "3.1.6", features = ["termination"] } -tokio = { version = "0.2.22", features = ["time"] } +tokio = { version = "0.3.2", features = ["time"] } exit-future = "0.2.0" dirs = "3.0.1" logging = { path = "../common/logging" } directory = {path = "../common/directory"} -futures = "0.3.5" +futures = "0.3.7" environment = { path = "../lighthouse/environment" } task_executor = { path = "../common/task_executor" } genesis = { path = "genesis" } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index e9339e2a725..dd3c9381271 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -40,10 +40,10 @@ eth2_ssz_derive = "0.1.0" state_processing = { path = "../../consensus/state_processing" } tree_hash = "0.1.1" types = { path = "../../consensus/types" } -tokio = "0.2.22" +tokio = "0.3.2" eth1 = { path = "../eth1" } websocket_server = { path = "../websocket_server" } -futures = "0.3.5" +futures = "0.3.7" genesis = { path = "../genesis" } integer-sqrt = "0.1.5" rand = "0.7.3" diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index b3dca216e83..d6fddf188d5 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -413,7 +413,7 @@ impl VerifiedAggregatedAttestation { // Ensure there has been no other observed aggregate for the given `aggregator_index`. // - // Note: do not observe yet, only observe once the attestation has been verfied. + // Note: do not observe yet, only observe once the attestation has been verified. match chain .observed_aggregators .read() diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index e9a3469e54e..443ff28de81 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -27,9 +27,9 @@ error-chain = "0.12.4" serde_yaml = "0.8.13" slog = { version = "2.5.2", features = ["max_level_trace"] } slog-async = "2.5.0" -tokio = "0.2.22" +tokio = "0.3.2" dirs = "3.0.1" -futures = "0.3.5" +futures = "0.3.7" reqwest = { version = "0.10.8", features = ["native-tls-vendored"] } url = "2.1.1" eth1 = { path = "../eth1" } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 723b099f647..14c374bf24a 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -254,10 +254,16 @@ where let (listen_addr, server) = http_api::serve(ctx, exit_future) .map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?; + let log_clone = context.log().clone(); + let http_api_task = async move { + server.await; + debug!(log_clone, "HTTP API server task ended"); + }; + context .clone() .executor - .spawn_without_exit(async move { server.await }, "http-api"); + .spawn_without_exit(http_api_task, "http-api"); Some(listen_addr) } else { @@ -283,7 +289,7 @@ where "Waiting for HTTP server port to open"; "port" => http_listen ); - tokio::time::delay_for(Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_secs(1)).await; } } @@ -442,10 +448,16 @@ where let (listen_addr, server) = http_api::serve(ctx, exit) .map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?; + let http_log = runtime_context.log().clone(); + let http_api_task = async move { + server.await; + debug!(http_log, "HTTP API server task ended"); + }; + runtime_context .clone() .executor - .spawn_without_exit(async move { server.await }, "http-api"); + .spawn_without_exit(http_api_task, "http-api"); Some(listen_addr) } else { diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index f416a8b5ffa..aa300310cf7 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -7,7 +7,7 @@ use slog::{debug, error, info, warn}; use slot_clock::SlotClock; use std::sync::Arc; use std::time::{Duration, Instant}; -use tokio::time::delay_for; +use tokio::time::sleep; use types::{EthSpec, Slot}; /// Create a warning log whenever the peer count is at or below this value. @@ -56,7 +56,7 @@ pub fn spawn_notifier( "peers" => peer_count_pretty(network.connected_peers()), "wait_time" => estimated_time_pretty(Some(next_slot.as_secs() as f64)), ); - delay_for(slot_duration).await; + sleep(slot_duration).await; } _ => break, } diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index bb7d016705f..9b10f278f5f 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -10,10 +10,11 @@ toml = "0.5.6" web3 = "0.11.0" sloggers = "1.0.1" environment = { path = "../../lighthouse/environment" } +tokio-compat-02 = "0.1" [dependencies] reqwest = { version = "0.10.8", features = ["native-tls-vendored"] } -futures = { version = "0.3.5", features = ["compat"] } +futures = { version = "0.3.7", features = ["compat"] } serde_json = "1.0.58" serde = { version = "1.0.116", features = ["derive"] } hex = "0.4.2" @@ -25,7 +26,7 @@ tree_hash = "0.1.1" eth2_hashing = "0.1.0" parking_lot = "0.11.0" slog = "2.5.2" -tokio = { version = "0.2.22", features = ["full"] } +tokio = { version = "0.3.2", features = ["full"] } state_processing = { path = "../../consensus/state_processing" } libflate = "1.0.2" lighthouse_metrics = { path = "../../common/lighthouse_metrics"} diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index 399890213ff..cd3004c7964 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -10,6 +10,7 @@ use slog::Logger; use sloggers::{null::NullLoggerBuilder, Build}; use std::ops::Range; use std::time::Duration; +use tokio_compat_02::FutureExt; use tree_hash::TreeHash; use types::{DepositData, EthSpec, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, Signature}; use web3::{transports::Http, Web3}; @@ -25,7 +26,7 @@ pub fn new_env() -> Environment { EnvironmentBuilder::minimal() // Use a single thread, so that when all tests are run in parallel they don't have so many // threads. - .single_thread_tokio_runtime() + .multi_threaded_tokio_runtime() .expect("should start tokio runtime") .null_logger() .expect("should start null logger") @@ -103,214 +104,230 @@ mod eth1_cache { #[tokio::test] async fn simple_scenario() { - let log = null_logger(); + async { + let log = null_logger(); + + for follow_distance in 0..2 { + let eth1 = GanacheEth1Instance::new() + .await + .expect("should start eth1 environment"); + let deposit_contract = ð1.deposit_contract; + let web3 = eth1.web3(); + + let initial_block_number = get_block_number(&web3).await; + + let service = Service::new( + Config { + endpoint: eth1.endpoint(), + deposit_contract_address: deposit_contract.address(), + lowest_cached_block_number: initial_block_number, + follow_distance, + ..Config::default() + }, + log.clone(), + MainnetEthSpec::default_spec(), + ); + + // Create some blocks and then consume them, performing the test `rounds` times. + for round in 0..2 { + let blocks = 4; + + let initial = if round == 0 { + initial_block_number + } else { + service + .blocks() + .read() + .highest_block_number() + .map(|n| n + follow_distance) + .expect("should have a latest block after the first round") + }; + + for _ in 0..blocks { + eth1.ganache.evm_mine().await.expect("should mine block"); + } + + service + .update_deposit_cache(None) + .await + .expect("should update deposit cache"); + service + .update_block_cache(None) + .await + .expect("should update block cache"); + + service + .update_block_cache(None) + .await + .expect("should update cache when nothing has changed"); + + assert_eq!( + service + .blocks() + .read() + .highest_block_number() + .map(|n| n + follow_distance), + Some(initial + blocks), + "should update {} blocks in round {} (follow {})", + blocks, + round, + follow_distance, + ); + } + } + } + .compat() + .await + } + + /// Tests the case where we attempt to download more blocks than will fit in the cache. + + #[tokio::test] + async fn big_skip() { + async { + let log = null_logger(); - for follow_distance in 0..2 { let eth1 = GanacheEth1Instance::new() .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; let web3 = eth1.web3(); - let initial_block_number = get_block_number(&web3).await; + let cache_len = 4; let service = Service::new( Config { endpoint: eth1.endpoint(), deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: initial_block_number, - follow_distance, + lowest_cached_block_number: get_block_number(&web3).await, + follow_distance: 0, + block_cache_truncation: Some(cache_len), ..Config::default() }, - log.clone(), + log, MainnetEthSpec::default_spec(), ); - // Create some blocks and then consume them, performing the test `rounds` times. - for round in 0..2 { - let blocks = 4; - - let initial = if round == 0 { - initial_block_number - } else { - service - .blocks() - .read() - .highest_block_number() - .map(|n| n + follow_distance) - .expect("should have a latest block after the first round") - }; - - for _ in 0..blocks { - eth1.ganache.evm_mine().await.expect("should mine block"); - } - - service - .update_deposit_cache(None) - .await - .expect("should update deposit cache"); - service - .update_block_cache(None) - .await - .expect("should update block cache"); + let blocks = cache_len * 2; - service - .update_block_cache(None) - .await - .expect("should update cache when nothing has changed"); - - assert_eq!( - service - .blocks() - .read() - .highest_block_number() - .map(|n| n + follow_distance), - Some(initial + blocks), - "should update {} blocks in round {} (follow {})", - blocks, - round, - follow_distance, - ); + for _ in 0..blocks { + eth1.ganache.evm_mine().await.expect("should mine block") } - } - } - - /// Tests the case where we attempt to download more blocks than will fit in the cache. - - #[tokio::test] - async fn big_skip() { - let log = null_logger(); - - let eth1 = GanacheEth1Instance::new() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); - - let cache_len = 4; - - let service = Service::new( - Config { - endpoint: eth1.endpoint(), - deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: get_block_number(&web3).await, - follow_distance: 0, - block_cache_truncation: Some(cache_len), - ..Config::default() - }, - log, - MainnetEthSpec::default_spec(), - ); - let blocks = cache_len * 2; + service + .update_deposit_cache(None) + .await + .expect("should update deposit cache"); + service + .update_block_cache(None) + .await + .expect("should update block cache"); - for _ in 0..blocks { - eth1.ganache.evm_mine().await.expect("should mine block") + assert_eq!( + service.block_cache_len(), + cache_len, + "should not grow cache beyond target" + ); } - - service - .update_deposit_cache(None) - .await - .expect("should update deposit cache"); - service - .update_block_cache(None) - .await - .expect("should update block cache"); - - assert_eq!( - service.block_cache_len(), - cache_len, - "should not grow cache beyond target" - ); + .compat() + .await; } /// Tests to ensure that the cache gets pruned when doing multiple downloads smaller than the /// cache size. #[tokio::test] async fn pruning() { - let log = null_logger(); + async { + let log = null_logger(); - let eth1 = GanacheEth1Instance::new() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let eth1 = GanacheEth1Instance::new() + .await + .expect("should start eth1 environment"); + let deposit_contract = ð1.deposit_contract; + let web3 = eth1.web3(); - let cache_len = 4; + let cache_len = 4; - let service = Service::new( - Config { - endpoint: eth1.endpoint(), - deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: get_block_number(&web3).await, - follow_distance: 0, - block_cache_truncation: Some(cache_len), - ..Config::default() - }, - log, - MainnetEthSpec::default_spec(), - ); + let service = Service::new( + Config { + endpoint: eth1.endpoint(), + deposit_contract_address: deposit_contract.address(), + lowest_cached_block_number: get_block_number(&web3).await, + follow_distance: 0, + block_cache_truncation: Some(cache_len), + ..Config::default() + }, + log, + MainnetEthSpec::default_spec(), + ); - for _ in 0..4u8 { - for _ in 0..cache_len / 2 { - eth1.ganache.evm_mine().await.expect("should mine block") + for _ in 0..4u8 { + for _ in 0..cache_len / 2 { + eth1.ganache.evm_mine().await.expect("should mine block") + } + service + .update_deposit_cache(None) + .await + .expect("should update deposit cache"); + service + .update_block_cache(None) + .await + .expect("should update block cache"); } - service - .update_deposit_cache(None) - .await - .expect("should update deposit cache"); - service - .update_block_cache(None) - .await - .expect("should update block cache"); - } - assert_eq!( - service.block_cache_len(), - cache_len, - "should not grow cache beyond target" - ); + assert_eq!( + service.block_cache_len(), + cache_len, + "should not grow cache beyond target" + ); + } + .compat() + .await; } #[tokio::test] async fn double_update() { - let log = null_logger(); + async { + let log = null_logger(); - let n = 16; + let n = 16; - let eth1 = GanacheEth1Instance::new() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let eth1 = GanacheEth1Instance::new() + .await + .expect("should start eth1 environment"); + let deposit_contract = ð1.deposit_contract; + let web3 = eth1.web3(); - let service = Service::new( - Config { - endpoint: eth1.endpoint(), - deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: get_block_number(&web3).await, - follow_distance: 0, - ..Config::default() - }, - log, - MainnetEthSpec::default_spec(), - ); + let service = Service::new( + Config { + endpoint: eth1.endpoint(), + deposit_contract_address: deposit_contract.address(), + lowest_cached_block_number: get_block_number(&web3).await, + follow_distance: 0, + ..Config::default() + }, + log, + MainnetEthSpec::default_spec(), + ); - for _ in 0..n { - eth1.ganache.evm_mine().await.expect("should mine block") + for _ in 0..n { + eth1.ganache.evm_mine().await.expect("should mine block") + } + futures::try_join!( + service.update_deposit_cache(None), + service.update_deposit_cache(None) + ) + .expect("should perform two simultaneous updates of deposit cache"); + futures::try_join!( + service.update_block_cache(None), + service.update_block_cache(None) + ) + .expect("should perform two simultaneous updates of block cache"); + + assert!(service.block_cache_len() >= n, "should grow the cache"); } - futures::try_join!( - service.update_deposit_cache(None), - service.update_deposit_cache(None) - ) - .expect("should perform two simultaneous updates of deposit cache"); - futures::try_join!( - service.update_block_cache(None), - service.update_block_cache(None) - ) - .expect("should perform two simultaneous updates of block cache"); - - assert!(service.block_cache_len() >= n, "should grow the cache"); + .compat() + .await; } } @@ -319,219 +336,231 @@ mod deposit_tree { #[tokio::test] async fn updating() { - let log = null_logger(); + async { + let log = null_logger(); - let n = 4; + let n = 4; - let eth1 = GanacheEth1Instance::new() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let eth1 = GanacheEth1Instance::new() + .await + .expect("should start eth1 environment"); + let deposit_contract = ð1.deposit_contract; + let web3 = eth1.web3(); - let start_block = get_block_number(&web3).await; + let start_block = get_block_number(&web3).await; - let service = Service::new( - Config { - endpoint: eth1.endpoint(), - deposit_contract_address: deposit_contract.address(), - deposit_contract_deploy_block: start_block, - follow_distance: 0, - ..Config::default() - }, - log, - MainnetEthSpec::default_spec(), - ); + let service = Service::new( + Config { + endpoint: eth1.endpoint(), + deposit_contract_address: deposit_contract.address(), + deposit_contract_deploy_block: start_block, + follow_distance: 0, + ..Config::default() + }, + log, + MainnetEthSpec::default_spec(), + ); - for round in 0..3 { - let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); + for round in 0..3 { + let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); - for deposit in &deposits { - deposit_contract - .deposit(deposit.clone()) - .await - .expect("should perform a deposit"); - } + for deposit in &deposits { + deposit_contract + .deposit(deposit.clone()) + .await + .expect("should perform a deposit"); + } - service - .update_deposit_cache(None) - .await - .expect("should perform update"); + service + .update_deposit_cache(None) + .await + .expect("should perform update"); - service - .update_deposit_cache(None) - .await - .expect("should perform update when nothing has changed"); + service + .update_deposit_cache(None) + .await + .expect("should perform update when nothing has changed"); - let first = n * round; - let last = n * (round + 1); + let first = n * round; + let last = n * (round + 1); - let (_root, local_deposits) = service - .deposits() - .read() - .cache - .get_deposits(first, last, last, 32) - .unwrap_or_else(|_| panic!("should get deposits in round {}", round)); + let (_root, local_deposits) = service + .deposits() + .read() + .cache + .get_deposits(first, last, last, 32) + .unwrap_or_else(|_| panic!("should get deposits in round {}", round)); - assert_eq!( - local_deposits.len(), - n as usize, - "should get the right number of deposits in round {}", - round - ); + assert_eq!( + local_deposits.len(), + n as usize, + "should get the right number of deposits in round {}", + round + ); - assert_eq!( - local_deposits - .iter() - .map(|d| d.data.clone()) - .collect::>(), - deposits.to_vec(), - "obtained deposits should match those submitted in round {}", - round - ); + assert_eq!( + local_deposits + .iter() + .map(|d| d.data.clone()) + .collect::>(), + deposits.to_vec(), + "obtained deposits should match those submitted in round {}", + round + ); + } } + .compat() + .await; } #[tokio::test] async fn double_update() { - let log = null_logger(); + async { + let log = null_logger(); - let n = 8; + let n = 8; - let eth1 = GanacheEth1Instance::new() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let eth1 = GanacheEth1Instance::new() + .await + .expect("should start eth1 environment"); + let deposit_contract = ð1.deposit_contract; + let web3 = eth1.web3(); - let start_block = get_block_number(&web3).await; + let start_block = get_block_number(&web3).await; - let service = Service::new( - Config { - endpoint: eth1.endpoint(), - deposit_contract_address: deposit_contract.address(), - deposit_contract_deploy_block: start_block, - lowest_cached_block_number: start_block, - follow_distance: 0, - ..Config::default() - }, - log, - MainnetEthSpec::default_spec(), - ); + let service = Service::new( + Config { + endpoint: eth1.endpoint(), + deposit_contract_address: deposit_contract.address(), + deposit_contract_deploy_block: start_block, + lowest_cached_block_number: start_block, + follow_distance: 0, + ..Config::default() + }, + log, + MainnetEthSpec::default_spec(), + ); - let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); + let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); - for deposit in &deposits { - deposit_contract - .deposit(deposit.clone()) - .await - .expect("should perform a deposit"); - } + for deposit in &deposits { + deposit_contract + .deposit(deposit.clone()) + .await + .expect("should perform a deposit"); + } - futures::try_join!( - service.update_deposit_cache(None), - service.update_deposit_cache(None) - ) - .expect("should perform two updates concurrently"); + futures::try_join!( + service.update_deposit_cache(None), + service.update_deposit_cache(None) + ) + .expect("should perform two updates concurrently"); - assert_eq!(service.deposit_cache_len(), n); + assert_eq!(service.deposit_cache_len(), n); + } + .compat() + .await; } #[tokio::test] async fn cache_consistency() { - let n = 8; + async { + let n = 8; - let spec = &MainnetEthSpec::default_spec(); + let spec = &MainnetEthSpec::default_spec(); - let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); + let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); - let eth1 = GanacheEth1Instance::new() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let eth1 = GanacheEth1Instance::new() + .await + .expect("should start eth1 environment"); + let deposit_contract = ð1.deposit_contract; + let web3 = eth1.web3(); - let mut deposit_roots = vec![]; - let mut deposit_counts = vec![]; + let mut deposit_roots = vec![]; + let mut deposit_counts = vec![]; - // Perform deposits to the smart contract, recording it's state along the way. - for deposit in &deposits { - deposit_contract - .deposit(deposit.clone()) - .await - .expect("should perform a deposit"); - let block_number = get_block_number(&web3).await; - deposit_roots.push( - blocking_deposit_root(ð1, block_number) - .await - .expect("should get root if contract exists"), - ); - deposit_counts.push( - blocking_deposit_count(ð1, block_number) + // Perform deposits to the smart contract, recording it's state along the way. + for deposit in &deposits { + deposit_contract + .deposit(deposit.clone()) .await - .expect("should get count if contract exists"), - ); - } + .expect("should perform a deposit"); + let block_number = get_block_number(&web3).await; + deposit_roots.push( + blocking_deposit_root(ð1, block_number) + .await + .expect("should get root if contract exists"), + ); + deposit_counts.push( + blocking_deposit_count(ð1, block_number) + .await + .expect("should get count if contract exists"), + ); + } - let mut tree = DepositCache::default(); - - // Pull all the deposit logs from the contract. - let block_number = get_block_number(&web3).await; - let logs: Vec<_> = blocking_deposit_logs(ð1, 0..block_number) - .await - .iter() - .map(|raw| raw.to_deposit_log(spec).expect("should parse deposit log")) - .inspect(|log| { - tree.insert_log(log.clone()) - .expect("should add consecutive logs") - }) - .collect(); - - // Check the logs for invariants. - for i in 0..logs.len() { - let log = &logs[i]; - assert_eq!( - log.deposit_data, deposits[i], - "log {} should have correct deposit data", - i - ); - assert_eq!(log.index, i as u64, "log {} should have correct index", i); - } + let mut tree = DepositCache::default(); - // For each deposit test some more invariants - for i in 0..n { - // Ensure the deposit count from the smart contract was as expected. - assert_eq!( - deposit_counts[i], - i as u64 + 1, - "deposit count should be accurate" - ); + // Pull all the deposit logs from the contract. + let block_number = get_block_number(&web3).await; + let logs: Vec<_> = blocking_deposit_logs(ð1, 0..block_number) + .await + .iter() + .map(|raw| raw.to_deposit_log(spec).expect("should parse deposit log")) + .inspect(|log| { + tree.insert_log(log.clone()) + .expect("should add consecutive logs") + }) + .collect(); + + // Check the logs for invariants. + for i in 0..logs.len() { + let log = &logs[i]; + assert_eq!( + log.deposit_data, deposits[i], + "log {} should have correct deposit data", + i + ); + assert_eq!(log.index, i as u64, "log {} should have correct index", i); + } - // Ensure that the root from the deposit tree matches what the contract reported. - let (root, deposits) = tree - .get_deposits(0, i as u64, deposit_counts[i], DEPOSIT_CONTRACT_TREE_DEPTH) - .expect("should get deposits"); - assert_eq!( - root, deposit_roots[i], - "tree deposit root {} should match the contract", - i - ); + // For each deposit test some more invariants + for i in 0..n { + // Ensure the deposit count from the smart contract was as expected. + assert_eq!( + deposit_counts[i], + i as u64 + 1, + "deposit count should be accurate" + ); - // Ensure that the deposits all prove into the root from the smart contract. - let deposit_root = deposit_roots[i]; - for (j, deposit) in deposits.iter().enumerate() { - assert!( - verify_merkle_proof( - deposit.data.tree_hash_root(), - &deposit.proof, - DEPOSIT_CONTRACT_TREE_DEPTH + 1, - j, - deposit_root - ), - "deposit merkle proof should prove into deposit contract root" - ) + // Ensure that the root from the deposit tree matches what the contract reported. + let (root, deposits) = tree + .get_deposits(0, i as u64, deposit_counts[i], DEPOSIT_CONTRACT_TREE_DEPTH) + .expect("should get deposits"); + assert_eq!( + root, deposit_roots[i], + "tree deposit root {} should match the contract", + i + ); + + // Ensure that the deposits all prove into the root from the smart contract. + let deposit_root = deposit_roots[i]; + for (j, deposit) in deposits.iter().enumerate() { + assert!( + verify_merkle_proof( + deposit.data.tree_hash_root(), + &deposit.proof, + DEPOSIT_CONTRACT_TREE_DEPTH + 1, + j, + deposit_root + ), + "deposit merkle proof should prove into deposit contract root" + ) + } } } + .compat() + .await; } } @@ -552,86 +581,90 @@ mod http { #[tokio::test] async fn incrementing_deposits() { - let eth1 = GanacheEth1Instance::new() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); - - let block_number = get_block_number(&web3).await; - let logs = blocking_deposit_logs(ð1, 0..block_number).await; - assert_eq!(logs.len(), 0); - - let mut old_root = blocking_deposit_root(ð1, block_number).await; - let mut old_block = get_block(ð1, block_number).await; - let mut old_block_number = block_number; - - assert_eq!( - blocking_deposit_count(ð1, block_number).await, - Some(0), - "should have deposit count zero" - ); - - for i in 1..=8 { - eth1.ganache - .increase_time(1) - .await - .expect("should be able to increase time on ganache"); - - deposit_contract - .deposit(random_deposit_data()) + async { + let eth1 = GanacheEth1Instance::new() .await - .expect("should perform a deposit"); + .expect("should start eth1 environment"); + let deposit_contract = ð1.deposit_contract; + let web3 = eth1.web3(); - // Check the logs. let block_number = get_block_number(&web3).await; let logs = blocking_deposit_logs(ð1, 0..block_number).await; - assert_eq!(logs.len(), i, "the number of logs should be as expected"); + assert_eq!(logs.len(), 0); + + let mut old_root = blocking_deposit_root(ð1, block_number).await; + let mut old_block = get_block(ð1, block_number).await; + let mut old_block_number = block_number; - // Check the deposit count. assert_eq!( blocking_deposit_count(ð1, block_number).await, - Some(i as u64), - "should have a correct deposit count" + Some(0), + "should have deposit count zero" ); - // Check the deposit root. - let new_root = blocking_deposit_root(ð1, block_number).await; - assert_ne!( - new_root, old_root, - "deposit root should change with each deposit" - ); - old_root = new_root; + for i in 1..=8 { + eth1.ganache + .increase_time(1) + .await + .expect("should be able to increase time on ganache"); - // Check the block hash. - let new_block = get_block(ð1, block_number).await; - assert_ne!( - new_block.hash, old_block.hash, - "block hash should change with each deposit" - ); + deposit_contract + .deposit(random_deposit_data()) + .await + .expect("should perform a deposit"); - // Check to ensure the timestamp is increasing - assert!( - old_block.timestamp <= new_block.timestamp, - "block timestamp should increase" - ); + // Check the logs. + let block_number = get_block_number(&web3).await; + let logs = blocking_deposit_logs(ð1, 0..block_number).await; + assert_eq!(logs.len(), i, "the number of logs should be as expected"); - old_block = new_block.clone(); + // Check the deposit count. + assert_eq!( + blocking_deposit_count(ð1, block_number).await, + Some(i as u64), + "should have a correct deposit count" + ); - // Check the block number. - assert!( - block_number > old_block_number, - "block number should increase" - ); - old_block_number = block_number; + // Check the deposit root. + let new_root = blocking_deposit_root(ð1, block_number).await; + assert_ne!( + new_root, old_root, + "deposit root should change with each deposit" + ); + old_root = new_root; - // Check to ensure the block root is changing - assert_ne!( - new_root, - Some(new_block.hash), - "the deposit root should be different to the block hash" - ); + // Check the block hash. + let new_block = get_block(ð1, block_number).await; + assert_ne!( + new_block.hash, old_block.hash, + "block hash should change with each deposit" + ); + + // Check to ensure the timestamp is increasing + assert!( + old_block.timestamp <= new_block.timestamp, + "block timestamp should increase" + ); + + old_block = new_block.clone(); + + // Check the block number. + assert!( + block_number > old_block_number, + "block number should increase" + ); + old_block_number = block_number; + + // Check to ensure the block root is changing + assert_ne!( + new_root, + Some(new_block.hash), + "the deposit root should be different to the block hash" + ); + } } + .compat() + .await; } } @@ -642,17 +675,94 @@ mod fast { // with the deposit count and root computed from the deposit cache. #[tokio::test] async fn deposit_cache_query() { - let log = null_logger(); + async { + let log = null_logger(); + + let eth1 = GanacheEth1Instance::new() + .await + .expect("should start eth1 environment"); + let deposit_contract = ð1.deposit_contract; + let web3 = eth1.web3(); + + let now = get_block_number(&web3).await; + let service = Service::new( + Config { + endpoint: eth1.endpoint(), + deposit_contract_address: deposit_contract.address(), + deposit_contract_deploy_block: now, + lowest_cached_block_number: now, + follow_distance: 0, + block_cache_truncation: None, + ..Config::default() + }, + log, + MainnetEthSpec::default_spec(), + ); + let n = 10; + let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); + for deposit in &deposits { + deposit_contract + .deposit(deposit.clone()) + .await + .expect("should perform a deposit"); + // Mine an extra block between deposits to test for corner cases + eth1.ganache.evm_mine().await.expect("should mine block"); + } + + service + .update_deposit_cache(None) + .await + .expect("should perform update"); - let eth1 = GanacheEth1Instance::new() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + assert!( + service.deposit_cache_len() >= n, + "should have imported n deposits" + ); + + for block_num in 0..=get_block_number(&web3).await { + let expected_deposit_count = blocking_deposit_count(ð1, block_num).await; + let expected_deposit_root = blocking_deposit_root(ð1, block_num).await; + + let deposit_count = service + .deposits() + .read() + .cache + .get_deposit_count_from_cache(block_num); + let deposit_root = service + .deposits() + .read() + .cache + .get_deposit_root_from_cache(block_num); + assert_eq!( + expected_deposit_count, deposit_count, + "deposit count from cache should match queried" + ); + assert_eq!( + expected_deposit_root, deposit_root, + "deposit root from cache should match queried" + ); + } + } + .compat() + .await; + } +} + +mod persist { + use super::*; + #[tokio::test] + async fn test_persist_caches() { + async { + let log = null_logger(); - let now = get_block_number(&web3).await; - let service = Service::new( - Config { + let eth1 = GanacheEth1Instance::new() + .await + .expect("should start eth1 environment"); + let deposit_contract = ð1.deposit_contract; + let web3 = eth1.web3(); + + let now = get_block_number(&web3).await; + let config = Config { endpoint: eth1.endpoint(), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: now, @@ -660,129 +770,61 @@ mod fast { follow_distance: 0, block_cache_truncation: None, ..Config::default() - }, - log, - MainnetEthSpec::default_spec(), - ); - let n = 10; - let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); - for deposit in &deposits { - deposit_contract - .deposit(deposit.clone()) + }; + let service = Service::new(config.clone(), log.clone(), MainnetEthSpec::default_spec()); + let n = 10; + let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); + for deposit in &deposits { + deposit_contract + .deposit(deposit.clone()) + .await + .expect("should perform a deposit"); + } + + service + .update_deposit_cache(None) .await - .expect("should perform a deposit"); - // Mine an extra block between deposits to test for corner cases - eth1.ganache.evm_mine().await.expect("should mine block"); - } + .expect("should perform update"); - service - .update_deposit_cache(None) - .await - .expect("should perform update"); - - assert!( - service.deposit_cache_len() >= n, - "should have imported n deposits" - ); - - for block_num in 0..=get_block_number(&web3).await { - let expected_deposit_count = blocking_deposit_count(ð1, block_num).await; - let expected_deposit_root = blocking_deposit_root(ð1, block_num).await; - - let deposit_count = service - .deposits() - .read() - .cache - .get_deposit_count_from_cache(block_num); - let deposit_root = service - .deposits() - .read() - .cache - .get_deposit_root_from_cache(block_num); + assert!( + service.deposit_cache_len() >= n, + "should have imported n deposits" + ); + + let deposit_count = service.deposit_cache_len(); + + service + .update_block_cache(None) + .await + .expect("should perform update"); + + assert!( + service.block_cache_len() >= n, + "should have imported n eth1 blocks" + ); + + let block_count = service.block_cache_len(); + + let eth1_bytes = service.as_bytes(); + + // Drop service and recover from bytes + drop(service); + + let recovered_service = + Service::from_bytes(ð1_bytes, config, log, MainnetEthSpec::default_spec()) + .unwrap(); assert_eq!( - expected_deposit_count, deposit_count, - "deposit count from cache should match queried" + recovered_service.block_cache_len(), + block_count, + "Should have equal cached blocks as before recovery" ); assert_eq!( - expected_deposit_root, deposit_root, - "deposit root from cache should match queried" + recovered_service.deposit_cache_len(), + deposit_count, + "Should have equal cached deposits as before recovery" ); } - } -} - -mod persist { - use super::*; - #[tokio::test] - async fn test_persist_caches() { - let log = null_logger(); - - let eth1 = GanacheEth1Instance::new() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); - - let now = get_block_number(&web3).await; - let config = Config { - endpoint: eth1.endpoint(), - deposit_contract_address: deposit_contract.address(), - deposit_contract_deploy_block: now, - lowest_cached_block_number: now, - follow_distance: 0, - block_cache_truncation: None, - ..Config::default() - }; - let service = Service::new(config.clone(), log.clone(), MainnetEthSpec::default_spec()); - let n = 10; - let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); - for deposit in &deposits { - deposit_contract - .deposit(deposit.clone()) - .await - .expect("should perform a deposit"); - } - - service - .update_deposit_cache(None) - .await - .expect("should perform update"); - - assert!( - service.deposit_cache_len() >= n, - "should have imported n deposits" - ); - - let deposit_count = service.deposit_cache_len(); - - service - .update_block_cache(None) - .await - .expect("should perform update"); - - assert!( - service.block_cache_len() >= n, - "should have imported n eth1 blocks" - ); - - let block_count = service.block_cache_len(); - - let eth1_bytes = service.as_bytes(); - - // Drop service and recover from bytes - drop(service); - - let recovered_service = - Service::from_bytes(ð1_bytes, config, log, MainnetEthSpec::default_spec()).unwrap(); - assert_eq!( - recovered_service.block_cache_len(), - block_count, - "Should have equal cached blocks as before recovery" - ); - assert_eq!( - recovered_service.deposit_cache_len(), - deposit_count, - "Should have equal cached deposits as before recovery" - ); + .compat() + .await } } diff --git a/beacon_node/eth2_libp2p/Cargo.toml b/beacon_node/eth2_libp2p/Cargo.toml index c78fc879b45..2e1bbbd1a5a 100644 --- a/beacon_node/eth2_libp2p/Cargo.toml +++ b/beacon_node/eth2_libp2p/Cargo.toml @@ -5,7 +5,8 @@ authors = ["Sigma Prime "] edition = "2018" [dependencies] -discv5 = { git = "https://github.com/sigp/discv5", rev = "fba7ceb5cfebd219ebbad6ffdb5d8c31dc8e4bc0", features = ["libp2p"] } +discv5 = { version = "0.1.0-beta.2", features = ["libp2p"] } +unsigned-varint = { git = "https://github.com/sigp/unsigned-varint", branch = "dep-update", features = ["codec"] } types = { path = "../../consensus/types" } hashset_delay = { path = "../../common/hashset_delay" } eth2_ssz_types = { path = "../../consensus/ssz_types" } @@ -15,15 +16,15 @@ eth2_ssz = "0.1.2" eth2_ssz_derive = "0.1.0" slog = { version = "2.5.2", features = ["max_level_trace"] } lighthouse_version = { path = "../../common/lighthouse_version" } -tokio = { version = "0.2.22", features = ["time", "macros"] } -futures = "0.3.5" +tokio = { version = "0.3.2", features = ["time", "macros"] } +futures = "0.3.7" error-chain = "0.12.4" dirs = "3.0.1" fnv = "1.0.7" -unsigned-varint = { git = "https://github.com/sigp/unsigned-varint", branch = "latest-codecs", features = ["codec"] } lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } smallvec = "1.4.2" +tokio-io-timeout = "0.5.0" lru = "0.6.0" parking_lot = "0.11.0" sha2 = "0.9.1" @@ -31,8 +32,7 @@ base64 = "0.13.0" snap = "1.0.1" void = "1.0.2" hex = "0.4.2" -tokio-io-timeout = "0.4.0" -tokio-util = { version = "0.3.1", features = ["codec", "compat"] } +tokio-util = { version = "0.4.0", features = ["codec", "compat"] } tiny-keccak = "2.0.2" task_executor = { path = "../../common/task_executor" } rand = "0.7.3" @@ -42,12 +42,12 @@ regex = "1.3.9" [dependencies.libp2p] #version = "0.23.0" git = "https://github.com/sigp/rust-libp2p" -rev = "f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c" +rev = "e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25" default-features = false features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns", "tcp-tokio"] [dev-dependencies] -tokio = { version = "0.2.22", features = ["full"] } +tokio = { version = "0.3.2", features = ["full"] } slog-term = "2.6.0" slog-async = "2.5.0" tempdir = "0.3.7" diff --git a/beacon_node/eth2_libp2p/src/behaviour/mod.rs b/beacon_node/eth2_libp2p/src/behaviour/mod.rs index fde3955c1f4..a313f5d43df 100644 --- a/beacon_node/eth2_libp2p/src/behaviour/mod.rs +++ b/beacon_node/eth2_libp2p/src/behaviour/mod.rs @@ -983,7 +983,14 @@ impl NetworkBehaviour for Behaviour { }; if let Some(goodbye_reason) = goodbye_reason { - debug!(self.log, "Disconnecting newly connected peer"; "peer_id" => peer_id.to_string(), "reason" => goodbye_reason.to_string()); + match goodbye_reason { + GoodbyeReason::Banned => { + debug!(self.log, "Disconnecting newly connected peer"; "peer_id" => peer_id.to_string(), "reason" => goodbye_reason.to_string()) + } + _ => { + trace!(self.log, "Disconnecting newly connected peer"; "peer_id" => peer_id.to_string(), "reason" => goodbye_reason.to_string()) + } + } self.peers_to_dc .push_back((peer_id.clone(), Some(goodbye_reason))); // NOTE: We don't inform the peer manager that this peer is disconnecting. It is simply @@ -1079,6 +1086,8 @@ impl NetworkBehaviour for Behaviour { // Inform the behaviour. delegate_to_behaviours!(self, inject_disconnected, peer_id); + debug!(self.log, "Peer disconnected"; "peer_id" => %peer_id); + // Decrement the PEERS_PER_CLIENT metric if let Some(kind) = self .network_globals diff --git a/beacon_node/eth2_libp2p/src/discovery/mod.rs b/beacon_node/eth2_libp2p/src/discovery/mod.rs index 5cbfe28b04e..21b5d8bb858 100644 --- a/beacon_node/eth2_libp2p/src/discovery/mod.rs +++ b/beacon_node/eth2_libp2p/src/discovery/mod.rs @@ -212,7 +212,10 @@ impl Discovery { // Start the discv5 service and obtain an event stream let event_stream = if !config.disable_discovery { - discv5.start(listen_socket).map_err(|e| e.to_string())?; + discv5 + .start(listen_socket) + .map_err(|e| e.to_string()) + .await?; debug!(log, "Discovery service started"); EventStream::Awaiting(Box::pin(discv5.event_stream())) } else { @@ -712,8 +715,10 @@ impl Discovery { return; } }; - // predicate for finding nodes with a matching fork - let eth2_fork_predicate = move |enr: &Enr| enr.eth2() == Ok(enr_fork_id.clone()); + // predicate for finding nodes with a matching fork and valid tcp port + let eth2_fork_predicate = move |enr: &Enr| { + enr.eth2() == Ok(enr_fork_id.clone()) && (enr.tcp().is_some() || enr.tcp6().is_some()) + }; // General predicate let predicate: Box bool + Send> = @@ -743,7 +748,7 @@ impl Discovery { } Ok(r) => { debug!(self.log, "Discovery query completed"; "peers_found" => r.len()); - let mut results: HashMap> = HashMap::new(); + let mut results: HashMap<_, Option> = HashMap::new(); r.iter().for_each(|enr| { // cache the found ENR's self.cached_enrs.put(enr.peer_id(), enr.clone()); @@ -766,7 +771,7 @@ impl Discovery { Ok(r) => { debug!(self.log, "Peer grouped subnet discovery request completed"; "peers_found" => r.len(), "subnets_searched_for" => format!("{:?}",subnets_searched_for)); - let mut mapped_results: HashMap> = HashMap::new(); + let mut mapped_results = HashMap::new(); // cache the found ENR's for enr in r.iter().cloned() { diff --git a/beacon_node/eth2_libp2p/src/lib.rs b/beacon_node/eth2_libp2p/src/lib.rs index 6d5526d97dd..6978e465efd 100644 --- a/beacon_node/eth2_libp2p/src/lib.rs +++ b/beacon_node/eth2_libp2p/src/lib.rs @@ -7,6 +7,8 @@ extern crate lazy_static; pub mod behaviour; mod config; + +#[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy pub mod discovery; mod metrics; mod peer_manager; @@ -64,6 +66,7 @@ pub use config::Config as NetworkConfig; pub use config::{GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage}; pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr}; pub use discv5; +pub use libp2p::bandwidth::BandwidthSinks; pub use libp2p::gossipsub::{MessageAcceptance, MessageId, Topic, TopicHash}; pub use libp2p::{core::ConnectedPoint, PeerId, Swarm}; pub use libp2p::{multiaddr, Multiaddr}; diff --git a/beacon_node/eth2_libp2p/src/peer_manager/mod.rs b/beacon_node/eth2_libp2p/src/peer_manager/mod.rs index fc95b6bd9f4..12038a72352 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/mod.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/mod.rs @@ -27,6 +27,7 @@ pub use libp2p::core::{identity::Keypair, Multiaddr}; pub mod client; mod peer_info; mod peer_sync_status; +#[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy mod peerdb; pub(crate) mod score; @@ -639,6 +640,7 @@ impl PeerManager { /// with a new `PeerId` which involves a discovery routing table lookup. We could dial the /// multiaddr here, however this could relate to duplicate PeerId's etc. If the lookup /// proves resource constraining, we should switch to multiaddr dialling here. + #[allow(clippy::mutable_key_type)] fn peers_discovered(&mut self, results: HashMap>) { let mut to_dial_peers = Vec::new(); diff --git a/beacon_node/eth2_libp2p/src/rpc/handler.rs b/beacon_node/eth2_libp2p/src/rpc/handler.rs index 7713aaf52d8..727653c3d13 100644 --- a/beacon_node/eth2_libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2_libp2p/src/rpc/handler.rs @@ -22,7 +22,8 @@ use std::{ task::{Context, Poll}, time::Duration, }; -use tokio::time::{delay_queue, delay_until, Delay, DelayQueue, Instant as TInstant}; +use tokio::time::{sleep_until, Instant as TInstant, Sleep}; +use tokio_util::time::{delay_queue, DelayQueue}; use types::EthSpec; /// The time (in seconds) before a substream that is awaiting a response from the user times out. @@ -132,7 +133,7 @@ enum HandlerState { /// /// While in this state the handler rejects new requests but tries to finish existing ones. /// Once the timer expires, all messages are killed. - ShuttingDown(Delay), + ShuttingDown(Sleep), /// The handler is deactivated. A goodbye has been sent and no more messages are sent or /// received. Deactivated, @@ -255,7 +256,7 @@ where self.dial_queue.push((id, req)); } - self.state = HandlerState::ShuttingDown(delay_until( + self.state = HandlerState::ShuttingDown(sleep_until( TInstant::now() + Duration::from_secs(SHUTDOWN_TIMEOUT_SECS as u64), )); } @@ -540,7 +541,7 @@ where // purge expired inbound substreams and send an error loop { - match self.inbound_substreams_delay.poll_next_unpin(cx) { + match self.inbound_substreams_delay.poll_expired(cx) { Poll::Ready(Some(Ok(inbound_id))) => { // handle a stream timeout for various states if let Some(info) = self.inbound_substreams.get_mut(inbound_id.get_ref()) { @@ -574,7 +575,7 @@ where // purge expired outbound substreams loop { - match self.outbound_substreams_delay.poll_next_unpin(cx) { + match self.outbound_substreams_delay.poll_expired(cx) { Poll::Ready(Some(Ok(outbound_id))) => { if let Some(OutboundInfo { proto, req_id, .. }) = self.outbound_substreams.remove(outbound_id.get_ref()) @@ -672,6 +673,7 @@ where if let Some(ref delay_key) = info.delay_key { self.inbound_substreams_delay.remove(delay_key); } + break; } else { // If we are not removing this substream, we reset the timer. // Each chunk is allowed RESPONSE_TIMEOUT to be sent. diff --git a/beacon_node/eth2_libp2p/src/rpc/protocol.rs b/beacon_node/eth2_libp2p/src/rpc/protocol.rs index ba6e5c18751..329d76ceb3a 100644 --- a/beacon_node/eth2_libp2p/src/rpc/protocol.rs +++ b/beacon_node/eth2_libp2p/src/rpc/protocol.rs @@ -503,8 +503,8 @@ impl From for RPCError { RPCError::SSZDecodeError(err) } } -impl From for RPCError { - fn from(_: tokio::time::Elapsed) -> Self { +impl From for RPCError { + fn from(_: tokio::time::error::Elapsed) -> Self { RPCError::StreamTimeout } } diff --git a/beacon_node/eth2_libp2p/src/service.rs b/beacon_node/eth2_libp2p/src/service.rs index 44e1e9319c2..ff2947572ae 100644 --- a/beacon_node/eth2_libp2p/src/service.rs +++ b/beacon_node/eth2_libp2p/src/service.rs @@ -12,6 +12,7 @@ use libp2p::core::{ identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed, }; use libp2p::{ + bandwidth::{BandwidthLogging, BandwidthSinks}, core, noise, swarm::{SwarmBuilder, SwarmEvent}, PeerId, Swarm, Transport, @@ -48,10 +49,10 @@ pub enum Libp2pEvent { pub struct Service { /// The libp2p Swarm handler. pub swarm: Swarm>, - + /// The bandwidth logger for the underlying libp2p transport. + pub bandwidth: Arc, /// This node's PeerId. pub local_peer_id: PeerId, - /// The libp2p logger handle. pub log: Logger, } @@ -100,10 +101,11 @@ impl Service { }; debug!(log, "Attempting to open listening ports"; "address" => format!("{}", config.listen_address), "tcp_port" => config.libp2p_port, "udp_port" => discovery_string); - let mut swarm = { + let (mut swarm, bandwidth) = { // Set up the transport - tcp/ws with noise and mplex - let transport = build_transport(local_keypair.clone()) + let (transport, bandwidth) = build_transport(local_keypair.clone()) .map_err(|e| format!("Failed to build transport: {:?}", e))?; + // Lighthouse network behaviour let behaviour = Behaviour::new( &local_keypair, @@ -121,14 +123,17 @@ impl Service { self.0.spawn(f, "libp2p"); } } - SwarmBuilder::new(transport, behaviour, local_peer_id.clone()) - .notify_handler_buffer_size(std::num::NonZeroUsize::new(32).expect("Not zero")) - .connection_event_buffer_size(64) - .incoming_connection_limit(10) - .outgoing_connection_limit(config.target_peers * 2) - .peer_connection_limit(MAX_CONNECTIONS_PER_PEER) - .executor(Box::new(Executor(executor))) - .build() + ( + SwarmBuilder::new(transport, behaviour, local_peer_id.clone()) + .notify_handler_buffer_size(std::num::NonZeroUsize::new(32).expect("Not zero")) + .connection_event_buffer_size(64) + .incoming_connection_limit(10) + .outgoing_connection_limit(config.target_peers * 2) + .peer_connection_limit(MAX_CONNECTIONS_PER_PEER) + .executor(Box::new(Executor(executor))) + .build(), + bandwidth, + ) }; // listen on the specified address @@ -221,6 +226,7 @@ impl Service { let service = Service { local_peer_id, + bandwidth, swarm, log, }; @@ -273,7 +279,7 @@ impl Service { endpoint: _, num_established, } => { - debug!(self.log, "Connection closed"; "peer_id"=> peer_id.to_string(), "cause" => format!("{:?}", cause), "connections" => num_established); + trace!(self.log, "Connection closed"; "peer_id"=> peer_id.to_string(), "cause" => format!("{:?}", cause), "connections" => num_established); } SwarmEvent::NewListenAddr(multiaddr) => { return Libp2pEvent::NewListenAddr(multiaddr) @@ -282,7 +288,7 @@ impl Service { local_addr, send_back_addr, } => { - debug!(self.log, "Incoming connection"; "our_addr" => local_addr.to_string(), "from" => send_back_addr.to_string()) + trace!(self.log, "Incoming connection"; "our_addr" => local_addr.to_string(), "from" => send_back_addr.to_string()) } SwarmEvent::IncomingConnectionError { local_addr, @@ -329,9 +335,13 @@ impl Service { } } +type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; + /// The implementation supports TCP/IP, WebSockets over TCP/IP, noise as the encryption layer, and /// mplex as the multiplexing layer. -fn build_transport(local_private_key: Keypair) -> std::io::Result> { +fn build_transport( + local_private_key: Keypair, +) -> std::io::Result<(BoxedTransport, Arc)> { let transport = libp2p::tcp::TokioTcpConfig::new().nodelay(true); let transport = libp2p::dns::DnsConfig::new(transport)?; #[cfg(feature = "libp2p-websocket")] @@ -340,21 +350,26 @@ fn build_transport(local_private_key: Keypair) -> std::io::Result) -> NetworkConfig { config } -pub async fn build_libp2p_instance(boot_nodes: Vec, log: slog::Logger) -> Libp2pInstance { +pub async fn build_libp2p_instance( + rt: Weak, + boot_nodes: Vec, + log: slog::Logger, +) -> Libp2pInstance { let port = unused_port("tcp").unwrap(); let config = build_config(port, boot_nodes); // launch libp2p service let (signal, exit) = exit_future::signal(); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); - let executor = task_executor::TaskExecutor::new( - tokio::runtime::Handle::current(), - exit, - log.clone(), - shutdown_tx, - ); + let executor = task_executor::TaskExecutor::new(rt, exit, log.clone(), shutdown_tx); Libp2pInstance( LibP2PService::new( executor, @@ -127,10 +128,14 @@ pub fn get_enr(node: &LibP2PService) -> Enr { // Returns `n` libp2p peers in fully connected topology. #[allow(dead_code)] -pub async fn build_full_mesh(log: slog::Logger, n: usize) -> Vec { +pub async fn build_full_mesh( + rt: Weak, + log: slog::Logger, + n: usize, +) -> Vec { let mut nodes = Vec::with_capacity(n); for _ in 0..n { - nodes.push(build_libp2p_instance(vec![], log.clone()).await); + nodes.push(build_libp2p_instance(rt.clone(), vec![], log.clone()).await); } let multiaddrs: Vec = nodes .iter() @@ -153,12 +158,15 @@ pub async fn build_full_mesh(log: slog::Logger, n: usize) -> Vec // Constructs a pair of nodes with separate loggers. The sender dials the receiver. // This returns a (sender, receiver) pair. #[allow(dead_code)] -pub async fn build_node_pair(log: &slog::Logger) -> (Libp2pInstance, Libp2pInstance) { +pub async fn build_node_pair( + rt: Weak, + log: &slog::Logger, +) -> (Libp2pInstance, Libp2pInstance) { let sender_log = log.new(o!("who" => "sender")); let receiver_log = log.new(o!("who" => "receiver")); - let mut sender = build_libp2p_instance(vec![], sender_log).await; - let mut receiver = build_libp2p_instance(vec![], receiver_log).await; + let mut sender = build_libp2p_instance(rt.clone(), vec![], sender_log).await; + let mut receiver = build_libp2p_instance(rt, vec![], receiver_log).await; let receiver_multiaddr = receiver.swarm.local_enr().multiaddr()[1].clone(); @@ -182,7 +190,7 @@ pub async fn build_node_pair(log: &slog::Logger) -> (Libp2pInstance, Libp2pInsta // wait for either both nodes to listen or a timeout tokio::select! { - _ = tokio::time::delay_for(Duration::from_millis(500)) => {} + _ = tokio::time::sleep(Duration::from_millis(500)) => {} _ = joined => {} } @@ -197,10 +205,10 @@ pub async fn build_node_pair(log: &slog::Logger) -> (Libp2pInstance, Libp2pInsta // Returns `n` peers in a linear topology #[allow(dead_code)] -pub async fn build_linear(log: slog::Logger, n: usize) -> Vec { +pub async fn build_linear(rt: Weak, log: slog::Logger, n: usize) -> Vec { let mut nodes = Vec::with_capacity(n); for _ in 0..n { - nodes.push(build_libp2p_instance(vec![], log.clone()).await); + nodes.push(build_libp2p_instance(rt.clone(), vec![], log.clone()).await); } let multiaddrs: Vec = nodes diff --git a/beacon_node/eth2_libp2p/tests/rpc_tests.rs b/beacon_node/eth2_libp2p/tests/rpc_tests.rs index cef3e1b2bd7..5a5e0b28f6b 100644 --- a/beacon_node/eth2_libp2p/tests/rpc_tests.rs +++ b/beacon_node/eth2_libp2p/tests/rpc_tests.rs @@ -3,8 +3,10 @@ use eth2_libp2p::rpc::methods::*; use eth2_libp2p::{BehaviourEvent, Libp2pEvent, Request, Response}; use slog::{debug, warn, Level}; use ssz_types::VariableList; +use std::sync::Arc; use std::time::Duration; -use tokio::time::delay_for; +use tokio::runtime::Runtime; +use tokio::time::sleep; use types::{ BeaconBlock, Epoch, EthSpec, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, Slot, }; @@ -13,97 +15,105 @@ mod common; type E = MinimalEthSpec; -#[tokio::test] // Tests the STATUS RPC message -async fn test_status_rpc() { +#[test] +fn test_status_rpc() { // set up the logging. The level and enabled logging or not let log_level = Level::Debug; let enable_logging = false; + let rt = Arc::new(Runtime::new().unwrap()); + let log = common::build_log(log_level, enable_logging); - // get sender/receiver - let (mut sender, mut receiver) = common::build_node_pair(&log).await; - - // Dummy STATUS RPC message - let rpc_request = Request::Status(StatusMessage { - fork_digest: [0; 4], - finalized_root: Hash256::from_low_u64_be(0), - finalized_epoch: Epoch::new(1), - head_root: Hash256::from_low_u64_be(0), - head_slot: Slot::new(1), - }); - - // Dummy STATUS RPC message - let rpc_response = Response::Status(StatusMessage { - fork_digest: [0; 4], - finalized_root: Hash256::from_low_u64_be(0), - finalized_epoch: Epoch::new(1), - head_root: Hash256::from_low_u64_be(0), - head_slot: Slot::new(1), - }); - - // build the sender future - let sender_future = async { - loop { - match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { - // Send a STATUS message - debug!(log, "Sending RPC"); - sender - .swarm - .send_request(peer_id, RequestId::Sync(10), rpc_request.clone()); - } - Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { - peer_id: _, - id: RequestId::Sync(10), - response, - }) => { - // Should receive the RPC response - debug!(log, "Sender Received"); - assert_eq!(response, rpc_response.clone()); - debug!(log, "Sender Completed"); - return; + rt.block_on(async { + // get sender/receiver + let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt), &log).await; + + // Dummy STATUS RPC message + let rpc_request = Request::Status(StatusMessage { + fork_digest: [0; 4], + finalized_root: Hash256::from_low_u64_be(0), + finalized_epoch: Epoch::new(1), + head_root: Hash256::from_low_u64_be(0), + head_slot: Slot::new(1), + }); + + // Dummy STATUS RPC message + let rpc_response = Response::Status(StatusMessage { + fork_digest: [0; 4], + finalized_root: Hash256::from_low_u64_be(0), + finalized_epoch: Epoch::new(1), + head_root: Hash256::from_low_u64_be(0), + head_slot: Slot::new(1), + }); + + // build the sender future + let sender_future = async { + loop { + match sender.next_event().await { + Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { + // Send a STATUS message + debug!(log, "Sending RPC"); + sender.swarm.send_request( + peer_id, + RequestId::Sync(10), + rpc_request.clone(), + ); + } + Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { + peer_id: _, + id: RequestId::Sync(10), + response, + }) => { + // Should receive the RPC response + debug!(log, "Sender Received"); + assert_eq!(response, rpc_response.clone()); + debug!(log, "Sender Completed"); + return; + } + _ => {} } - _ => {} } - } - }; - - // build the receiver future - let receiver_future = async { - loop { - match receiver.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { - peer_id, - id, - request, - }) => { - if request == rpc_request { - // send the response - debug!(log, "Receiver Received"); - receiver - .swarm - .send_successful_response(peer_id, id, rpc_response.clone()); + }; + + // build the receiver future + let receiver_future = async { + loop { + match receiver.next_event().await { + Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + peer_id, + id, + request, + }) => { + if request == rpc_request { + // send the response + debug!(log, "Receiver Received"); + receiver.swarm.send_successful_response( + peer_id, + id, + rpc_response.clone(), + ); + } } + _ => {} // Ignore other events } - _ => {} // Ignore other events } - } - }; + }; - tokio::select! { - _ = sender_future => {} - _ = receiver_future => {} - _ = delay_for(Duration::from_secs(30)) => { - panic!("Future timed out"); + tokio::select! { + _ = sender_future => {} + _ = receiver_future => {} + _ = sleep(Duration::from_secs(30)) => { + panic!("Future timed out"); + } } - } + }) } -#[tokio::test] // Tests a streamed BlocksByRange RPC Message -async fn test_blocks_by_range_chunked_rpc() { +#[test] +fn test_blocks_by_range_chunked_rpc() { // set up the logging. The level and enabled logging or not let log_level = Level::Trace; let enable_logging = false; @@ -112,108 +122,114 @@ async fn test_blocks_by_range_chunked_rpc() { let log = common::build_log(log_level, enable_logging); - // get sender/receiver - let (mut sender, mut receiver) = common::build_node_pair(&log).await; - - // BlocksByRange Request - let rpc_request = Request::BlocksByRange(BlocksByRangeRequest { - start_slot: 0, - count: messages_to_send, - step: 0, - }); - - // BlocksByRange Response - let spec = E::default_spec(); - let empty_block = BeaconBlock::empty(&spec); - let empty_signed = SignedBeaconBlock { - message: empty_block, - signature: Signature::empty(), - }; - let rpc_response = Response::BlocksByRange(Some(Box::new(empty_signed))); - - // keep count of the number of messages received - let mut messages_received = 0; - // build the sender future - let sender_future = async { - loop { - match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { - // Send a STATUS message - debug!(log, "Sending RPC"); - sender - .swarm - .send_request(peer_id, RequestId::Sync(10), rpc_request.clone()); - } - Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { - peer_id: _, - id: RequestId::Sync(10), - response, - }) => { - warn!(log, "Sender received a response"); - match response { - Response::BlocksByRange(Some(_)) => { - assert_eq!(response, rpc_response.clone()); - messages_received += 1; - warn!(log, "Chunk received"); - } - Response::BlocksByRange(None) => { - // should be exactly 10 messages before terminating - assert_eq!(messages_received, messages_to_send); - // end the test - return; + let rt = Arc::new(Runtime::new().unwrap()); + + rt.block_on(async { + // get sender/receiver + let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt), &log).await; + + // BlocksByRange Request + let rpc_request = Request::BlocksByRange(BlocksByRangeRequest { + start_slot: 0, + count: messages_to_send, + step: 0, + }); + + // BlocksByRange Response + let spec = E::default_spec(); + let empty_block = BeaconBlock::empty(&spec); + let empty_signed = SignedBeaconBlock { + message: empty_block, + signature: Signature::empty(), + }; + let rpc_response = Response::BlocksByRange(Some(Box::new(empty_signed))); + + // keep count of the number of messages received + let mut messages_received = 0; + // build the sender future + let sender_future = async { + loop { + match sender.next_event().await { + Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { + // Send a STATUS message + debug!(log, "Sending RPC"); + sender.swarm.send_request( + peer_id, + RequestId::Sync(10), + rpc_request.clone(), + ); + } + Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { + peer_id: _, + id: RequestId::Sync(10), + response, + }) => { + warn!(log, "Sender received a response"); + match response { + Response::BlocksByRange(Some(_)) => { + assert_eq!(response, rpc_response.clone()); + messages_received += 1; + warn!(log, "Chunk received"); + } + Response::BlocksByRange(None) => { + // should be exactly 10 messages before terminating + assert_eq!(messages_received, messages_to_send); + // end the test + return; + } + _ => panic!("Invalid RPC received"), } - _ => panic!("Invalid RPC received"), } + _ => {} // Ignore other behaviour events } - _ => {} // Ignore other behaviour events } - } - }; - - // build the receiver future - let receiver_future = async { - loop { - match receiver.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { - peer_id, - id, - request, - }) => { - if request == rpc_request { - // send the response - warn!(log, "Receiver got request"); - for _ in 1..=messages_to_send { + }; + + // build the receiver future + let receiver_future = async { + loop { + match receiver.next_event().await { + Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + peer_id, + id, + request, + }) => { + if request == rpc_request { + // send the response + warn!(log, "Receiver got request"); + for _ in 1..=messages_to_send { + receiver.swarm.send_successful_response( + peer_id.clone(), + id, + rpc_response.clone(), + ); + } + // send the stream termination receiver.swarm.send_successful_response( - peer_id.clone(), + peer_id, id, - rpc_response.clone(), + Response::BlocksByRange(None), ); } - // send the stream termination - receiver.swarm.send_successful_response( - peer_id, - id, - Response::BlocksByRange(None), - ); } + _ => {} // Ignore other events } - _ => {} // Ignore other events } - } - }; + }; - tokio::select! { - _ = sender_future => {} - _ = receiver_future => {} - _ = delay_for(Duration::from_secs(30)) => { - panic!("Future timed out"); + tokio::select! { + _ = sender_future => {} + _ = receiver_future => {} + _ = sleep(Duration::from_secs(10)) => { + panic!("Future timed out"); + } } - } + }) } -#[tokio::test] // Tests that a streamed BlocksByRange RPC Message terminates when all expected chunks were received -async fn test_blocks_by_range_chunked_rpc_terminates_correctly() { +#[test] +fn test_blocks_by_range_chunked_rpc_terminates_correctly() { // set up the logging. The level and enabled logging or not let log_level = Level::Debug; let enable_logging = false; @@ -223,237 +239,248 @@ async fn test_blocks_by_range_chunked_rpc_terminates_correctly() { let log = common::build_log(log_level, enable_logging); - // get sender/receiver - let (mut sender, mut receiver) = common::build_node_pair(&log).await; - - // BlocksByRange Request - let rpc_request = Request::BlocksByRange(BlocksByRangeRequest { - start_slot: 0, - count: messages_to_send, - step: 0, - }); + let rt = Arc::new(Runtime::new().unwrap()); + + rt.block_on(async { + // get sender/receiver + let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt), &log).await; + + // BlocksByRange Request + let rpc_request = Request::BlocksByRange(BlocksByRangeRequest { + start_slot: 0, + count: messages_to_send, + step: 0, + }); + + // BlocksByRange Response + let spec = E::default_spec(); + let empty_block = BeaconBlock::empty(&spec); + let empty_signed = SignedBeaconBlock { + message: empty_block, + signature: Signature::empty(), + }; + let rpc_response = Response::BlocksByRange(Some(Box::new(empty_signed))); + + // keep count of the number of messages received + let mut messages_received: u64 = 0; + // build the sender future + let sender_future = async { + loop { + match sender.next_event().await { + Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { + // Send a STATUS message + debug!(log, "Sending RPC"); + sender.swarm.send_request( + peer_id, + RequestId::Sync(10), + rpc_request.clone(), + ); + } + Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { + peer_id: _, + id: RequestId::Sync(10), + response, + }) => + // Should receive the RPC response + { + debug!(log, "Sender received a response"); + match response { + Response::BlocksByRange(Some(_)) => { + assert_eq!(response, rpc_response.clone()); + messages_received += 1; + } + Response::BlocksByRange(None) => { + // should be exactly 10 messages, as requested + assert_eq!(messages_received, messages_to_send); + } + _ => panic!("Invalid RPC received"), + } + } - // BlocksByRange Response - let spec = E::default_spec(); - let empty_block = BeaconBlock::empty(&spec); - let empty_signed = SignedBeaconBlock { - message: empty_block, - signature: Signature::empty(), - }; - let rpc_response = Response::BlocksByRange(Some(Box::new(empty_signed))); - - // keep count of the number of messages received - let mut messages_received: u64 = 0; - // build the sender future - let sender_future = async { - loop { - match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { - // Send a STATUS message - debug!(log, "Sending RPC"); - sender - .swarm - .send_request(peer_id, RequestId::Sync(10), rpc_request.clone()); + _ => {} // Ignore other behaviour events } - Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { - peer_id: _, - id: RequestId::Sync(10), - response, - }) => - // Should receive the RPC response + } + }; + + // determine messages to send (PeerId, RequestId). If some, indicates we still need to send + // messages + let mut message_info = None; + // the number of messages we've sent + let mut messages_sent = 0; + let receiver_future = async { + loop { + // this future either drives the sending/receiving or times out allowing messages to be + // sent in the timeout + match futures::future::select( + Box::pin(receiver.next_event()), + tokio::time::sleep(Duration::from_secs(1)), + ) + .await { - debug!(log, "Sender received a response"); - match response { - Response::BlocksByRange(Some(_)) => { - assert_eq!(response, rpc_response.clone()); - messages_received += 1; - } - Response::BlocksByRange(None) => { - // should be exactly 10 messages, as requested - assert_eq!(messages_received, messages_to_send); + futures::future::Either::Left(( + Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + peer_id, + id, + request, + }), + _, + )) => { + if request == rpc_request { + // send the response + warn!(log, "Receiver got request"); + message_info = Some((peer_id, id)); } - _ => panic!("Invalid RPC received"), } + futures::future::Either::Right((_, _)) => {} // The timeout hit, send messages if required + _ => continue, } - _ => {} // Ignore other behaviour events - } - } - }; - - // determine messages to send (PeerId, RequestId). If some, indicates we still need to send - // messages - let mut message_info = None; - // the number of messages we've sent - let mut messages_sent = 0; - let receiver_future = async { - loop { - // this future either drives the sending/receiving or times out allowing messages to be - // sent in the timeout - match futures::future::select( - Box::pin(receiver.next_event()), - tokio::time::delay_for(Duration::from_secs(1)), - ) - .await - { - futures::future::Either::Left(( - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { - peer_id, - id, - request, - }), - _, - )) => { - if request == rpc_request { - // send the response - warn!(log, "Receiver got request"); - message_info = Some((peer_id, id)); + // if we need to send messages send them here. This will happen after a delay + if message_info.is_some() { + messages_sent += 1; + let (peer_id, stream_id) = message_info.as_ref().unwrap(); + receiver.swarm.send_successful_response( + peer_id.clone(), + stream_id.clone(), + rpc_response.clone(), + ); + debug!(log, "Sending message {}", messages_sent); + if messages_sent == messages_to_send + extra_messages_to_send { + // stop sending messages + return; } } - futures::future::Either::Right((_, _)) => {} // The timeout hit, send messages if required - _ => continue, } + }; - // if we need to send messages send them here. This will happen after a delay - if message_info.is_some() { - messages_sent += 1; - let (peer_id, stream_id) = message_info.as_ref().unwrap(); - receiver.swarm.send_successful_response( - peer_id.clone(), - stream_id.clone(), - rpc_response.clone(), - ); - debug!(log, "Sending message {}", messages_sent); - if messages_sent == messages_to_send + extra_messages_to_send { - // stop sending messages - return; - } + tokio::select! { + _ = sender_future => {} + _ = receiver_future => {} + _ = sleep(Duration::from_secs(30)) => { + panic!("Future timed out"); } } - }; - - tokio::select! { - _ = sender_future => {} - _ = receiver_future => {} - _ = delay_for(Duration::from_secs(30)) => { - panic!("Future timed out"); - } - } + }) } -#[tokio::test] // Tests an empty response to a BlocksByRange RPC Message -async fn test_blocks_by_range_single_empty_rpc() { +#[test] +fn test_blocks_by_range_single_empty_rpc() { // set up the logging. The level and enabled logging or not let log_level = Level::Trace; let enable_logging = false; let log = common::build_log(log_level, enable_logging); - - // get sender/receiver - let (mut sender, mut receiver) = common::build_node_pair(&log).await; - - // BlocksByRange Request - let rpc_request = Request::BlocksByRange(BlocksByRangeRequest { - start_slot: 0, - count: 10, - step: 0, - }); - - // BlocksByRange Response - let spec = E::default_spec(); - let empty_block = BeaconBlock::empty(&spec); - let empty_signed = SignedBeaconBlock { - message: empty_block, - signature: Signature::empty(), - }; - let rpc_response = Response::BlocksByRange(Some(Box::new(empty_signed))); - - let messages_to_send = 1; - - // keep count of the number of messages received - let mut messages_received = 0; - // build the sender future - let sender_future = async { - loop { - match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { - // Send a STATUS message - debug!(log, "Sending RPC"); - sender - .swarm - .send_request(peer_id, RequestId::Sync(10), rpc_request.clone()); - } - Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { - peer_id: _, - id: RequestId::Sync(10), - response, - }) => match response { - Response::BlocksByRange(Some(_)) => { - assert_eq!(response, rpc_response.clone()); - messages_received += 1; - warn!(log, "Chunk received"); - } - Response::BlocksByRange(None) => { - // should be exactly 10 messages before terminating - assert_eq!(messages_received, messages_to_send); - // end the test - return; + let rt = Arc::new(Runtime::new().unwrap()); + + rt.block_on(async { + // get sender/receiver + let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt), &log).await; + + // BlocksByRange Request + let rpc_request = Request::BlocksByRange(BlocksByRangeRequest { + start_slot: 0, + count: 10, + step: 0, + }); + + // BlocksByRange Response + let spec = E::default_spec(); + let empty_block = BeaconBlock::empty(&spec); + let empty_signed = SignedBeaconBlock { + message: empty_block, + signature: Signature::empty(), + }; + let rpc_response = Response::BlocksByRange(Some(Box::new(empty_signed))); + + let messages_to_send = 1; + + // keep count of the number of messages received + let mut messages_received = 0; + // build the sender future + let sender_future = async { + loop { + match sender.next_event().await { + Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { + // Send a STATUS message + debug!(log, "Sending RPC"); + sender.swarm.send_request( + peer_id, + RequestId::Sync(10), + rpc_request.clone(), + ); } - _ => panic!("Invalid RPC received"), - }, - _ => {} // Ignore other behaviour events + Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { + peer_id: _, + id: RequestId::Sync(10), + response, + }) => match response { + Response::BlocksByRange(Some(_)) => { + assert_eq!(response, rpc_response.clone()); + messages_received += 1; + warn!(log, "Chunk received"); + } + Response::BlocksByRange(None) => { + // should be exactly 10 messages before terminating + assert_eq!(messages_received, messages_to_send); + // end the test + return; + } + _ => panic!("Invalid RPC received"), + }, + _ => {} // Ignore other behaviour events + } } - } - }; - - // build the receiver future - let receiver_future = async { - loop { - match receiver.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { - peer_id, - id, - request, - }) => { - if request == rpc_request { - // send the response - warn!(log, "Receiver got request"); - - for _ in 1..=messages_to_send { + }; + + // build the receiver future + let receiver_future = async { + loop { + match receiver.next_event().await { + Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + peer_id, + id, + request, + }) => { + if request == rpc_request { + // send the response + warn!(log, "Receiver got request"); + + for _ in 1..=messages_to_send { + receiver.swarm.send_successful_response( + peer_id.clone(), + id, + rpc_response.clone(), + ); + } + // send the stream termination receiver.swarm.send_successful_response( - peer_id.clone(), + peer_id, id, - rpc_response.clone(), + Response::BlocksByRange(None), ); } - // send the stream termination - receiver.swarm.send_successful_response( - peer_id, - id, - Response::BlocksByRange(None), - ); } + _ => {} // Ignore other events } - _ => {} // Ignore other events + } + }; + tokio::select! { + _ = sender_future => {} + _ = receiver_future => {} + _ = sleep(Duration::from_secs(20)) => { + panic!("Future timed out"); } } - }; - tokio::select! { - _ = sender_future => {} - _ = receiver_future => {} - _ = delay_for(Duration::from_secs(20)) => { - panic!("Future timed out"); - } - } + }) } -#[tokio::test] // Tests a streamed, chunked BlocksByRoot RPC Message // The size of the reponse is a full `BeaconBlock` // which is greater than the Snappy frame size. Hence, this test // serves to test the snappy framing format as well. -async fn test_blocks_by_root_chunked_rpc() { +#[test] +fn test_blocks_by_root_chunked_rpc() { // set up the logging. The level and enabled logging or not let log_level = Level::Debug; let enable_logging = false; @@ -463,108 +490,113 @@ async fn test_blocks_by_root_chunked_rpc() { let log = common::build_log(log_level, enable_logging); let spec = E::default_spec(); + let rt = Arc::new(Runtime::new().unwrap()); // get sender/receiver - let (mut sender, mut receiver) = common::build_node_pair(&log).await; - - // BlocksByRoot Request - let rpc_request = Request::BlocksByRoot(BlocksByRootRequest { - block_roots: VariableList::from(vec![ - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - ]), - }); - - // BlocksByRoot Response - let full_block = BeaconBlock::full(&spec); - let signed_full_block = SignedBeaconBlock { - message: full_block, - signature: Signature::empty(), - }; - let rpc_response = Response::BlocksByRoot(Some(Box::new(signed_full_block))); - - // keep count of the number of messages received - let mut messages_received = 0; - // build the sender future - let sender_future = async { - loop { - match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { - // Send a STATUS message - debug!(log, "Sending RPC"); - sender - .swarm - .send_request(peer_id, RequestId::Sync(10), rpc_request.clone()); - } - Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { - peer_id: _, - id: RequestId::Sync(10), - response, - }) => match response { - Response::BlocksByRoot(Some(_)) => { - assert_eq!(response, rpc_response.clone()); - messages_received += 1; - debug!(log, "Chunk received"); - } - Response::BlocksByRoot(None) => { - // should be exactly messages_to_send - assert_eq!(messages_received, messages_to_send); - // end the test - return; + rt.block_on(async { + let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt), &log).await; + + // BlocksByRoot Request + let rpc_request = Request::BlocksByRoot(BlocksByRootRequest { + block_roots: VariableList::from(vec![ + Hash256::from_low_u64_be(0), + Hash256::from_low_u64_be(0), + Hash256::from_low_u64_be(0), + ]), + }); + + // BlocksByRoot Response + let full_block = BeaconBlock::full(&spec); + let signed_full_block = SignedBeaconBlock { + message: full_block, + signature: Signature::empty(), + }; + let rpc_response = Response::BlocksByRoot(Some(Box::new(signed_full_block))); + + // keep count of the number of messages received + let mut messages_received = 0; + // build the sender future + let sender_future = async { + loop { + match sender.next_event().await { + Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { + // Send a STATUS message + debug!(log, "Sending RPC"); + sender.swarm.send_request( + peer_id, + RequestId::Sync(10), + rpc_request.clone(), + ); } - _ => {} // Ignore other RPC messages - }, - _ => {} // Ignore other behaviour events + Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { + peer_id: _, + id: RequestId::Sync(10), + response, + }) => match response { + Response::BlocksByRoot(Some(_)) => { + assert_eq!(response, rpc_response.clone()); + messages_received += 1; + debug!(log, "Chunk received"); + } + Response::BlocksByRoot(None) => { + // should be exactly messages_to_send + assert_eq!(messages_received, messages_to_send); + // end the test + return; + } + _ => {} // Ignore other RPC messages + }, + _ => {} // Ignore other behaviour events + } } - } - }; - - // build the receiver future - let receiver_future = async { - loop { - match receiver.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { - peer_id, - id, - request, - }) => { - if request == rpc_request { - // send the response - debug!(log, "Receiver got request"); - - for _ in 1..=messages_to_send { + }; + + // build the receiver future + let receiver_future = async { + loop { + match receiver.next_event().await { + Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + peer_id, + id, + request, + }) => { + if request == rpc_request { + // send the response + debug!(log, "Receiver got request"); + + for _ in 1..=messages_to_send { + receiver.swarm.send_successful_response( + peer_id.clone(), + id, + rpc_response.clone(), + ); + debug!(log, "Sending message"); + } + // send the stream termination receiver.swarm.send_successful_response( - peer_id.clone(), + peer_id, id, - rpc_response.clone(), + Response::BlocksByRange(None), ); - debug!(log, "Sending message"); + debug!(log, "Send stream term"); } - // send the stream termination - receiver.swarm.send_successful_response( - peer_id, - id, - Response::BlocksByRange(None), - ); - debug!(log, "Send stream term"); } + _ => {} // Ignore other events } - _ => {} // Ignore other events + } + }; + tokio::select! { + _ = sender_future => {} + _ = receiver_future => {} + _ = sleep(Duration::from_secs(30)) => { + panic!("Future timed out"); } } - }; - tokio::select! { - _ = sender_future => {} - _ = receiver_future => {} - _ = delay_for(Duration::from_secs(30)) => { - panic!("Future timed out"); - } - } + }) } -#[tokio::test] // Tests a streamed, chunked BlocksByRoot RPC Message terminates when all expected reponses have been received -async fn test_blocks_by_root_chunked_rpc_terminates_correctly() { +#[test] +fn test_blocks_by_root_chunked_rpc_terminates_correctly() { // set up the logging. The level and enabled logging or not let log_level = Level::Debug; let enable_logging = false; @@ -575,182 +607,190 @@ async fn test_blocks_by_root_chunked_rpc_terminates_correctly() { let log = common::build_log(log_level, enable_logging); let spec = E::default_spec(); + let rt = Arc::new(Runtime::new().unwrap()); // get sender/receiver - let (mut sender, mut receiver) = common::build_node_pair(&log).await; - - // BlocksByRoot Request - let rpc_request = Request::BlocksByRoot(BlocksByRootRequest { - block_roots: VariableList::from(vec![ - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - ]), - }); - - // BlocksByRoot Response - let full_block = BeaconBlock::full(&spec); - let signed_full_block = SignedBeaconBlock { - message: full_block, - signature: Signature::empty(), - }; - let rpc_response = Response::BlocksByRoot(Some(Box::new(signed_full_block))); - - // keep count of the number of messages received - let mut messages_received = 0; - // build the sender future - let sender_future = async { - loop { - match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { - // Send a STATUS message - debug!(log, "Sending RPC"); - sender - .swarm - .send_request(peer_id, RequestId::Sync(10), rpc_request.clone()); - } - Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { - peer_id: _, - id: RequestId::Sync(10), - response, - }) => { - debug!(log, "Sender received a response"); - match response { - Response::BlocksByRoot(Some(_)) => { - assert_eq!(response, rpc_response.clone()); - messages_received += 1; - debug!(log, "Chunk received"); - } - Response::BlocksByRoot(None) => { - // should be exactly messages_to_send - assert_eq!(messages_received, messages_to_send); - // end the test - return; + rt.block_on(async { + let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt), &log).await; + + // BlocksByRoot Request + let rpc_request = Request::BlocksByRoot(BlocksByRootRequest { + block_roots: VariableList::from(vec![ + Hash256::from_low_u64_be(0), + Hash256::from_low_u64_be(0), + Hash256::from_low_u64_be(0), + Hash256::from_low_u64_be(0), + Hash256::from_low_u64_be(0), + Hash256::from_low_u64_be(0), + Hash256::from_low_u64_be(0), + Hash256::from_low_u64_be(0), + Hash256::from_low_u64_be(0), + Hash256::from_low_u64_be(0), + ]), + }); + + // BlocksByRoot Response + let full_block = BeaconBlock::full(&spec); + let signed_full_block = SignedBeaconBlock { + message: full_block, + signature: Signature::empty(), + }; + let rpc_response = Response::BlocksByRoot(Some(Box::new(signed_full_block))); + + // keep count of the number of messages received + let mut messages_received = 0; + // build the sender future + let sender_future = async { + loop { + match sender.next_event().await { + Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { + // Send a STATUS message + debug!(log, "Sending RPC"); + sender.swarm.send_request( + peer_id, + RequestId::Sync(10), + rpc_request.clone(), + ); + } + Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { + peer_id: _, + id: RequestId::Sync(10), + response, + }) => { + debug!(log, "Sender received a response"); + match response { + Response::BlocksByRoot(Some(_)) => { + assert_eq!(response, rpc_response.clone()); + messages_received += 1; + debug!(log, "Chunk received"); + } + Response::BlocksByRoot(None) => { + // should be exactly messages_to_send + assert_eq!(messages_received, messages_to_send); + // end the test + return; + } + _ => {} // Ignore other RPC messages } - _ => {} // Ignore other RPC messages } + _ => {} // Ignore other behaviour events } - _ => {} // Ignore other behaviour events } - } - }; - - // determine messages to send (PeerId, RequestId). If some, indicates we still need to send - // messages - let mut message_info = None; - // the number of messages we've sent - let mut messages_sent = 0; - let receiver_future = async { - loop { - // this future either drives the sending/receiving or times out allowing messages to be - // sent in the timeout - match futures::future::select( - Box::pin(receiver.next_event()), - tokio::time::delay_for(Duration::from_millis(1000)), - ) - .await - { - futures::future::Either::Left(( - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { - peer_id, - id, - request, - }), - _, - )) => { - if request == rpc_request { - // send the response - warn!(log, "Receiver got request"); - message_info = Some((peer_id, id)); + }; + + // determine messages to send (PeerId, RequestId). If some, indicates we still need to send + // messages + let mut message_info = None; + // the number of messages we've sent + let mut messages_sent = 0; + let receiver_future = async { + loop { + // this future either drives the sending/receiving or times out allowing messages to be + // sent in the timeout + match futures::future::select( + Box::pin(receiver.next_event()), + tokio::time::sleep(Duration::from_millis(1000)), + ) + .await + { + futures::future::Either::Left(( + Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + peer_id, + id, + request, + }), + _, + )) => { + if request == rpc_request { + // send the response + warn!(log, "Receiver got request"); + message_info = Some((peer_id, id)); + } } + futures::future::Either::Right((_, _)) => {} // The timeout hit, send messages if required + _ => continue, } - futures::future::Either::Right((_, _)) => {} // The timeout hit, send messages if required - _ => continue, - } - // if we need to send messages send them here. This will happen after a delay - if message_info.is_some() { - messages_sent += 1; - let (peer_id, stream_id) = message_info.as_ref().unwrap(); - receiver.swarm.send_successful_response( - peer_id.clone(), - stream_id.clone(), - rpc_response.clone(), - ); - debug!(log, "Sending message {}", messages_sent); - if messages_sent == messages_to_send + extra_messages_to_send { - // stop sending messages - return; + // if we need to send messages send them here. This will happen after a delay + if message_info.is_some() { + messages_sent += 1; + let (peer_id, stream_id) = message_info.as_ref().unwrap(); + receiver.swarm.send_successful_response( + peer_id.clone(), + stream_id.clone(), + rpc_response.clone(), + ); + debug!(log, "Sending message {}", messages_sent); + if messages_sent == messages_to_send + extra_messages_to_send { + // stop sending messages + return; + } } } - } - }; + }; - tokio::select! { - _ = sender_future => {} - _ = receiver_future => {} - _ = delay_for(Duration::from_secs(30)) => { - panic!("Future timed out"); + tokio::select! { + _ = sender_future => {} + _ = receiver_future => {} + _ = sleep(Duration::from_secs(30)) => { + panic!("Future timed out"); + } } - } + }) } -#[tokio::test] // Tests a Goodbye RPC message -async fn test_goodbye_rpc() { +#[test] +fn test_goodbye_rpc() { // set up the logging. The level and enabled logging or not let log_level = Level::Trace; let enable_logging = false; let log = common::build_log(log_level, enable_logging); + let rt = Arc::new(Runtime::new().unwrap()); // get sender/receiver - let (mut sender, mut receiver) = common::build_node_pair(&log).await; - - // build the sender future - let sender_future = async { - loop { - match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { - // Send a goodbye and disconnect - debug!(log, "Sending RPC"); - sender - .swarm - .goodbye_peer(&peer_id, GoodbyeReason::IrrelevantNetwork); - } - Libp2pEvent::Behaviour(BehaviourEvent::PeerDisconnected(_)) => { - return; + rt.block_on(async { + let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt), &log).await; + + // build the sender future + let sender_future = async { + loop { + match sender.next_event().await { + Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { + // Send a goodbye and disconnect + debug!(log, "Sending RPC"); + sender + .swarm + .goodbye_peer(&peer_id, GoodbyeReason::IrrelevantNetwork); + } + Libp2pEvent::Behaviour(BehaviourEvent::PeerDisconnected(_)) => { + return; + } + _ => {} // Ignore other RPC messages } - _ => {} // Ignore other RPC messages } - } - }; - - // build the receiver future - let receiver_future = async { - loop { - match receiver.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDisconnected(_)) => { - // Should receive sent RPC request - return; + }; + + // build the receiver future + let receiver_future = async { + loop { + match receiver.next_event().await { + Libp2pEvent::Behaviour(BehaviourEvent::PeerDisconnected(_)) => { + // Should receive sent RPC request + return; + } + _ => {} // Ignore other events } - _ => {} // Ignore other events } - } - }; + }; - let total_future = futures::future::join(sender_future, receiver_future); + let total_future = futures::future::join(sender_future, receiver_future); - tokio::select! { - _ = total_future => {} - _ = delay_for(Duration::from_secs(30)) => { - panic!("Future timed out"); + tokio::select! { + _ = total_future => {} + _ = sleep(Duration::from_secs(30)) => { + panic!("Future timed out"); + } } - } + }) } diff --git a/beacon_node/genesis/Cargo.toml b/beacon_node/genesis/Cargo.toml index 2150754eab9..383c79d904a 100644 --- a/beacon_node/genesis/Cargo.toml +++ b/beacon_node/genesis/Cargo.toml @@ -6,9 +6,10 @@ edition = "2018" [dev-dependencies] eth1_test_rig = { path = "../../testing/eth1_test_rig" } +tokio-compat-02 = "0.1" [dependencies] -futures = "0.3.5" +futures = "0.3.7" types = { path = "../../consensus/types"} environment = { path = "../../lighthouse/environment"} eth1 = { path = "../eth1"} @@ -18,7 +19,7 @@ merkle_proof = { path = "../../consensus/merkle_proof" } eth2_ssz = "0.1.2" eth2_hashing = "0.1.0" tree_hash = "0.1.1" -tokio = { version = "0.2.22", features = ["full"] } +tokio = { version = "0.3.2", features = ["full"] } parking_lot = "0.11.0" slog = "2.5.2" exit-future = "0.2.0" diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index e16e577af72..77c936198e9 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -12,7 +12,7 @@ use std::sync::{ Arc, }; use std::time::Duration; -use tokio::time::delay_for; +use tokio::time::sleep; use types::{BeaconState, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256}; /// The number of blocks that are pulled per request whilst waiting for genesis. @@ -149,7 +149,7 @@ impl Eth1GenesisService { "valid_deposits" => eth1_service.get_raw_valid_signature_count(), ); - delay_for(update_interval).await; + sleep(update_interval).await; continue; } @@ -229,9 +229,9 @@ impl Eth1GenesisService { // We assume that if we imported a large chunk of blocks then we're some distance from // the head and we should sync faster. if blocks_imported >= BLOCKS_PER_GENESIS_POLL { - delay_for(Duration::from_millis(50)).await; + sleep(Duration::from_millis(50)).await; } else { - delay_for(update_interval).await; + sleep(update_interval).await; } } } diff --git a/beacon_node/genesis/tests/tests.rs b/beacon_node/genesis/tests/tests.rs index feab02e4129..a197649bf4d 100644 --- a/beacon_node/genesis/tests/tests.rs +++ b/beacon_node/genesis/tests/tests.rs @@ -9,11 +9,12 @@ use futures::compat::Future01CompatExt; use genesis::{Eth1Config, Eth1GenesisService}; use state_processing::is_valid_genesis_state; use std::time::Duration; +use tokio_compat_02::FutureExt; use types::{test_utils::generate_deterministic_keypair, Hash256, MinimalEthSpec}; pub fn new_env() -> Environment { EnvironmentBuilder::minimal() - .single_thread_tokio_runtime() + .multi_threaded_tokio_runtime() .expect("should start tokio runtime") .null_logger() .expect("should start null logger") @@ -27,83 +28,86 @@ fn basic() { let log = env.core_context().log().clone(); let mut spec = env.eth2_config().spec.clone(); - env.runtime().block_on(async { - let eth1 = GanacheEth1Instance::new() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + env.runtime().block_on( + async { + let eth1 = GanacheEth1Instance::new() + .await + .expect("should start eth1 environment"); + let deposit_contract = ð1.deposit_contract; + let web3 = eth1.web3(); - let now = web3 - .eth() - .block_number() - .compat() - .await - .map(|v| v.as_u64()) - .expect("should get block number"); + let now = web3 + .eth() + .block_number() + .compat() + .await + .map(|v| v.as_u64()) + .expect("should get block number"); - let service = Eth1GenesisService::new( - Eth1Config { - endpoint: eth1.endpoint(), - deposit_contract_address: deposit_contract.address(), - deposit_contract_deploy_block: now, - lowest_cached_block_number: now, - follow_distance: 0, - block_cache_truncation: None, - ..Eth1Config::default() - }, - log, - spec.clone(), - ); + let service = Eth1GenesisService::new( + Eth1Config { + endpoint: eth1.endpoint(), + deposit_contract_address: deposit_contract.address(), + deposit_contract_deploy_block: now, + lowest_cached_block_number: now, + follow_distance: 0, + block_cache_truncation: None, + ..Eth1Config::default() + }, + log, + spec.clone(), + ); - // NOTE: this test is sensitive to the response speed of the external web3 server. If - // you're experiencing failures, try increasing the update_interval. - let update_interval = Duration::from_millis(500); + // NOTE: this test is sensitive to the response speed of the external web3 server. If + // you're experiencing failures, try increasing the update_interval. + let update_interval = Duration::from_millis(500); - spec.min_genesis_time = 0; - spec.min_genesis_active_validator_count = 8; + spec.min_genesis_time = 0; + spec.min_genesis_active_validator_count = 8; - let deposits = (0..spec.min_genesis_active_validator_count + 2) - .map(|i| { - deposit_contract.deposit_helper::( - generate_deterministic_keypair(i as usize), - Hash256::from_low_u64_le(i), - 32_000_000_000, - ) - }) - .map(|deposit| DelayThenDeposit { - delay: Duration::from_secs(0), - deposit, - }) - .collect::>(); + let deposits = (0..spec.min_genesis_active_validator_count + 2) + .map(|i| { + deposit_contract.deposit_helper::( + generate_deterministic_keypair(i as usize), + Hash256::from_low_u64_le(i), + 32_000_000_000, + ) + }) + .map(|deposit| DelayThenDeposit { + delay: Duration::from_secs(0), + deposit, + }) + .collect::>(); - let deposit_future = deposit_contract.deposit_multiple(deposits); + let deposit_future = deposit_contract.deposit_multiple(deposits); - let wait_future = - service.wait_for_genesis_state::(update_interval, spec.clone()); + let wait_future = + service.wait_for_genesis_state::(update_interval, spec.clone()); - let state = futures::try_join!(deposit_future, wait_future) - .map(|(_, state)| state) - .expect("should finish waiting for genesis"); + let state = futures::try_join!(deposit_future, wait_future) + .map(|(_, state)| state) + .expect("should finish waiting for genesis"); - // Note: using ganache these deposits are 1-per-block, therefore we know there should only be - // the minimum number of validators. - assert_eq!( - state.validators.len(), - spec.min_genesis_active_validator_count as usize, - "should have expected validator count" - ); + // Note: using ganache these deposits are 1-per-block, therefore we know there should only be + // the minimum number of validators. + assert_eq!( + state.validators.len(), + spec.min_genesis_active_validator_count as usize, + "should have expected validator count" + ); - assert!(state.genesis_time > 0, "should have some genesis time"); + assert!(state.genesis_time > 0, "should have some genesis time"); - assert!( - is_valid_genesis_state(&state, &spec), - "should be valid genesis state" - ); + assert!( + is_valid_genesis_state(&state, &spec), + "should be valid genesis state" + ); - assert!( - is_valid_genesis_state(&state, &spec), - "should be valid genesis state" - ); - }); + assert!( + is_valid_genesis_state(&state, &spec), + "should be valid genesis state" + ); + } + .compat(), + ); } diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index ca4a8a18391..d8784ea5c39 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -5,9 +5,9 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] -warp = { git = "https://github.com/paulhauner/warp", branch = "cors-wildcard" } +warp = { git = "https://github.com/sigp/warp ", branch = "lighthouse" } serde = { version = "1.0.116", features = ["derive"] } -tokio = { version = "0.2.22", features = ["macros"] } +tokio = { version = "0.3.2", features = ["macros"] } parking_lot = "0.11.0" types = { path = "../../consensus/types" } hex = "0.4.2" @@ -31,4 +31,5 @@ bs58 = "0.3.1" store = { path = "../store" } environment = { path = "../../lighthouse/environment" } tree_hash = "0.1.1" -discv5 = { git = "https://github.com/sigp/discv5", rev = "fba7ceb5cfebd219ebbad6ffdb5d8c31dc8e4bc0", features = ["libp2p"] } +discv5 = { git = "https://github.com/sigp/discv5", rev = "f117b3ca56fa3dca2317270434634ff7106d391a", features = ["libp2p"] } +tokio-compat-02 = "0.1" diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 3568353970f..d37cbfb76fb 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -17,10 +17,7 @@ use beacon_chain::{ }; use beacon_proposer_cache::BeaconProposerCache; use block_id::BlockId; -use eth2::{ - types::{self as api_types, ValidatorId}, - StatusCode, -}; +use eth2::types::{self as api_types, ValidatorId}; use eth2_libp2p::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; use network::NetworkMessage; @@ -42,6 +39,7 @@ use types::{ Hash256, ProposerSlashing, PublicKey, PublicKeyBytes, RelativeEpoch, SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, Slot, YamlConfig, }; +use warp::http::StatusCode; use warp::{http::Response, Filter}; use warp_utils::task::{blocking_json_task, blocking_task}; @@ -2251,12 +2249,14 @@ pub fn serve( .map(|reply| warp::reply::with_header(reply, "Server", &version_with_platform())) .with(cors_builder.build()); - let (listening_socket, server) = warp::serve(routes).try_bind_with_graceful_shutdown( - SocketAddrV4::new(config.listen_addr, config.listen_port), - async { - shutdown.await; - }, - )?; + let (listening_socket, server) = { + warp::serve(routes).try_bind_with_graceful_shutdown( + SocketAddrV4::new(config.listen_addr, config.listen_port), + async { + shutdown.await; + }, + )? + }; info!( log, diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 987b659e2b3..430a243d03c 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -7,6 +7,7 @@ use beacon_chain::{ use discv5::enr::{CombinedKey, EnrBuilder}; use environment::null_logger; use eth2::Error; +use eth2::StatusCode; use eth2::{types::*, BeaconNodeHttpClient, Url}; use eth2_libp2p::{ rpc::methods::MetaData, @@ -21,12 +22,12 @@ use std::net::Ipv4Addr; use std::sync::Arc; use tokio::sync::mpsc; use tokio::sync::oneshot; +use tokio_compat_02::FutureExt; use tree_hash::TreeHash; use types::{ test_utils::generate_deterministic_keypairs, AggregateSignature, BeaconState, BitList, Domain, EthSpec, Hash256, Keypair, MainnetEthSpec, RelativeEpoch, SelectionProof, SignedRoot, Slot, }; -use warp::http::StatusCode; type E = MainnetEthSpec; @@ -1825,277 +1826,337 @@ impl ApiTester { } } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn beacon_get() { - ApiTester::new() - .test_beacon_genesis() - .await - .test_beacon_states_root() - .await - .test_beacon_states_fork() - .await - .test_beacon_states_finality_checkpoints() - .await - .test_beacon_states_validators() - .await - .test_beacon_states_validator_balances() - .await - .test_beacon_states_committees() - .await - .test_beacon_states_validator_id() - .await - .test_beacon_headers_all_slots() - .await - .test_beacon_headers_all_parents() - .await - .test_beacon_headers_block_id() - .await - .test_beacon_blocks() - .await - .test_beacon_blocks_attestations() - .await - .test_beacon_blocks_root() - .await - .test_get_beacon_pool_attestations() - .await - .test_get_beacon_pool_attester_slashings() - .await - .test_get_beacon_pool_proposer_slashings() - .await - .test_get_beacon_pool_voluntary_exits() - .await; + async { + ApiTester::new() + .test_beacon_genesis() + .await + .test_beacon_states_root() + .await + .test_beacon_states_fork() + .await + .test_beacon_states_finality_checkpoints() + .await + .test_beacon_states_validators() + .await + .test_beacon_states_validator_balances() + .await + .test_beacon_states_committees() + .await + .test_beacon_states_validator_id() + .await + .test_beacon_headers_all_slots() + .await + .test_beacon_headers_all_parents() + .await + .test_beacon_headers_block_id() + .await + .test_beacon_blocks() + .await + .test_beacon_blocks_attestations() + .await + .test_beacon_blocks_root() + .await + .test_get_beacon_pool_attestations() + .await + .test_get_beacon_pool_attester_slashings() + .await + .test_get_beacon_pool_proposer_slashings() + .await + .test_get_beacon_pool_voluntary_exits() + .await; + } + .compat() + .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn post_beacon_blocks_valid() { - ApiTester::new().test_post_beacon_blocks_valid().await; + ApiTester::new() + .test_post_beacon_blocks_valid() + .compat() + .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn post_beacon_blocks_invalid() { - ApiTester::new().test_post_beacon_blocks_invalid().await; + ApiTester::new() + .test_post_beacon_blocks_invalid() + .compat() + .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn beacon_pools_post_attestations_valid() { ApiTester::new() .test_post_beacon_pool_attestations_valid() + .compat() .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn beacon_pools_post_attestations_invalid() { ApiTester::new() .test_post_beacon_pool_attestations_invalid() + .compat() .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn beacon_pools_post_attester_slashings_valid() { ApiTester::new() .test_post_beacon_pool_attester_slashings_valid() + .compat() .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn beacon_pools_post_attester_slashings_invalid() { ApiTester::new() .test_post_beacon_pool_attester_slashings_invalid() + .compat() .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn beacon_pools_post_proposer_slashings_valid() { ApiTester::new() .test_post_beacon_pool_proposer_slashings_valid() + .compat() .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn beacon_pools_post_proposer_slashings_invalid() { ApiTester::new() .test_post_beacon_pool_proposer_slashings_invalid() + .compat() .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn beacon_pools_post_voluntary_exits_valid() { ApiTester::new() .test_post_beacon_pool_voluntary_exits_valid() + .compat() .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn beacon_pools_post_voluntary_exits_invalid() { ApiTester::new() .test_post_beacon_pool_voluntary_exits_invalid() + .compat() .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn config_get() { ApiTester::new() .test_get_config_fork_schedule() + .compat() .await .test_get_config_spec() + .compat() .await .test_get_config_deposit_contract() + .compat() .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn debug_get() { ApiTester::new() .test_get_debug_beacon_states() + .compat() .await .test_get_debug_beacon_heads() + .compat() .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn node_get() { ApiTester::new() .test_get_node_version() + .compat() .await .test_get_node_syncing() + .compat() .await .test_get_node_identity() + .compat() .await .test_get_node_health() + .compat() .await .test_get_node_peers_by_id() + .compat() .await .test_get_node_peers() + .compat() .await .test_get_node_peer_count() + .compat() .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_duties_attester() { - ApiTester::new().test_get_validator_duties_attester().await; + ApiTester::new() + .test_get_validator_duties_attester() + .compat() + .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_duties_attester_with_skip_slots() { ApiTester::new() .skip_slots(E::slots_per_epoch() * 2) .test_get_validator_duties_attester() + .compat() .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_duties_proposer() { - ApiTester::new().test_get_validator_duties_proposer().await; + ApiTester::new() + .test_get_validator_duties_proposer() + .compat() + .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_duties_proposer_with_skip_slots() { ApiTester::new() .skip_slots(E::slots_per_epoch() * 2) .test_get_validator_duties_proposer() + .compat() .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn block_production() { - ApiTester::new().test_block_production().await; + ApiTester::new().test_block_production().compat().await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn block_production_with_skip_slots() { ApiTester::new() .skip_slots(E::slots_per_epoch() * 2) .test_block_production() + .compat() .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_attestation_data() { - ApiTester::new().test_get_validator_attestation_data().await; + ApiTester::new() + .test_get_validator_attestation_data() + .compat() + .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_attestation_data_with_skip_slots() { ApiTester::new() .skip_slots(E::slots_per_epoch() * 2) .test_get_validator_attestation_data() + .compat() .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_aggregate_attestation() { ApiTester::new() .test_get_validator_aggregate_attestation() + .compat() .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_aggregate_attestation_with_skip_slots() { ApiTester::new() .skip_slots(E::slots_per_epoch() * 2) .test_get_validator_aggregate_attestation() + .compat() .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_aggregate_and_proofs_valid() { ApiTester::new() .test_get_validator_aggregate_and_proofs_valid() + .compat() .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_aggregate_and_proofs_valid_with_skip_slots() { ApiTester::new() .skip_slots(E::slots_per_epoch() * 2) .test_get_validator_aggregate_and_proofs_valid() + .compat() .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_aggregate_and_proofs_invalid() { ApiTester::new() .test_get_validator_aggregate_and_proofs_invalid() + .compat() .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_aggregate_and_proofs_invalid_with_skip_slots() { ApiTester::new() .skip_slots(E::slots_per_epoch() * 2) .test_get_validator_aggregate_and_proofs_invalid() + .compat() .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_beacon_committee_subscriptions() { ApiTester::new() .test_get_validator_beacon_committee_subscriptions() + .compat() .await; } -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn lighthouse_endpoints() { ApiTester::new() .test_get_lighthouse_health() + .compat() .await .test_get_lighthouse_syncing() + .compat() .await .test_get_lighthouse_proto_array() + .compat() .await .test_get_lighthouse_validator_inclusion() + .compat() .await .test_get_lighthouse_validator_inclusion_global() + .compat() .await .test_get_lighthouse_eth1_syncing() + .compat() .await .test_get_lighthouse_eth1_block_cache() + .compat() .await .test_get_lighthouse_eth1_deposit_cache() + .compat() .await .test_get_lighthouse_beacon_states_ssz() + .compat() .await .test_get_lighthouse_staking() + .compat() .await; } diff --git a/beacon_node/http_metrics/Cargo.toml b/beacon_node/http_metrics/Cargo.toml index 5b1715e689e..df11b99589c 100644 --- a/beacon_node/http_metrics/Cargo.toml +++ b/beacon_node/http_metrics/Cargo.toml @@ -8,7 +8,7 @@ edition = "2018" [dependencies] prometheus = "0.10.0" -warp = { git = "https://github.com/paulhauner/warp", branch = "cors-wildcard" } +warp = { git = "https://github.com/sigp/warp ", branch = "lighthouse" } serde = { version = "1.0.116", features = ["derive"] } slog = "2.5.2" beacon_chain = { path = "../beacon_chain" } @@ -22,7 +22,8 @@ lighthouse_version = { path = "../../common/lighthouse_version" } warp_utils = { path = "../../common/warp_utils" } [dev-dependencies] -tokio = { version = "0.2.22", features = ["sync"] } +tokio = { version = "0.3.2", features = ["sync"] } reqwest = { version = "0.10.8", features = ["json"] } environment = { path = "../../lighthouse/environment" } types = { path = "../../consensus/types" } +tokio-compat-02 = "0.1" diff --git a/beacon_node/http_metrics/tests/tests.rs b/beacon_node/http_metrics/tests/tests.rs index c537e7e4f41..cd0420f4bd1 100644 --- a/beacon_node/http_metrics/tests/tests.rs +++ b/beacon_node/http_metrics/tests/tests.rs @@ -5,42 +5,47 @@ use reqwest::StatusCode; use std::net::Ipv4Addr; use std::sync::Arc; use tokio::sync::oneshot; +use tokio_compat_02::FutureExt; use types::MainnetEthSpec; type Context = http_metrics::Context>; -#[tokio::test(core_threads = 2)] +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn returns_200_ok() { - let log = null_logger().unwrap(); + async { + let log = null_logger().unwrap(); - let context = Arc::new(Context { - config: Config { - enabled: true, - listen_addr: Ipv4Addr::new(127, 0, 0, 1), - listen_port: 0, - allow_origin: None, - }, - chain: None, - db_path: None, - freezer_db_path: None, - log, - }); + let context = Arc::new(Context { + config: Config { + enabled: true, + listen_addr: Ipv4Addr::new(127, 0, 0, 1), + listen_port: 0, + allow_origin: None, + }, + chain: None, + db_path: None, + freezer_db_path: None, + log, + }); - let ctx = context.clone(); - let (_shutdown_tx, shutdown_rx) = oneshot::channel::<()>(); - let server_shutdown = async { - // It's not really interesting why this triggered, just that it happened. - let _ = shutdown_rx.await; - }; - let (listening_socket, server) = http_metrics::serve(ctx, server_shutdown).unwrap(); + let ctx = context.clone(); + let (_shutdown_tx, shutdown_rx) = oneshot::channel::<()>(); + let server_shutdown = async { + // It's not really interesting why this triggered, just that it happened. + let _ = shutdown_rx.await; + }; + let (listening_socket, server) = http_metrics::serve(ctx, server_shutdown).unwrap(); - tokio::spawn(async { server.await }); + tokio::spawn(async { server.await }); - let url = format!( - "http://{}:{}/metrics", - listening_socket.ip(), - listening_socket.port() - ); + let url = format!( + "http://{}:{}/metrics", + listening_socket.ip(), + listening_socket.port() + ); - assert_eq!(reqwest::get(&url).await.unwrap().status(), StatusCode::OK); + assert_eq!(reqwest::get(&url).await.unwrap().status(), StatusCode::OK); + } + .compat() + .await } diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 3a55853a538..725fc937fee 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -11,6 +11,8 @@ lazy_static = "1.4.0" matches = "0.1.8" tempfile = "3.1.0" exit-future = "0.2.0" +slog-term = "2.6.0" +slog-async = "2.5.0" [dependencies] beacon_chain = { path = "../beacon_chain" } @@ -25,9 +27,9 @@ hex = "0.4.2" eth2_ssz = "0.1.2" eth2_ssz_types = { path = "../../consensus/ssz_types" } tree_hash = "0.1.1" -futures = "0.3.5" +futures = "0.3.7" error-chain = "0.12.4" -tokio = { version = "0.2.22", features = ["full"] } +tokio = { version = "0.3.2", features = ["full"] } parking_lot = "0.11.0" smallvec = "1.4.2" rand = "0.7.3" diff --git a/beacon_node/network/src/attestation_service/tests/mod.rs b/beacon_node/network/src/attestation_service/tests/mod.rs index 469bbf74753..e0d6da5d5d1 100644 --- a/beacon_node/network/src/attestation_service/tests/mod.rs +++ b/beacon_node/network/src/attestation_service/tests/mod.rs @@ -156,7 +156,7 @@ mod tests { tokio::select! { _ = collect_stream_fut => {return events} - _ = tokio::time::delay_for( + _ = tokio::time::sleep( Duration::from_millis(SLOT_DURATION_MILLIS) * num_slots_before_timeout, ) => { return events; } } diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index d16722782d8..27a50803d75 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -233,6 +233,8 @@ impl Worker { | Err(e @ BlockError::BeaconChainError(_)) => { debug!(self.log, "Could not verify block for gossip, ignoring the block"; "error" => e.to_string()); + // Prevent recurring behaviour by penalizing the peer slightly. + self.penalize_peer(peer_id.clone(), PeerAction::HighToleranceError); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return; } @@ -511,6 +513,12 @@ impl Worker { "block" => %beacon_block_root, "type" => ?attestation_type, ); + + // Peers that are slow or not to spec can spam us with these messages draining our + // bandwidth. We therefore penalize these peers when they do this. + self.penalize_peer(peer_id.clone(), PeerAction::LowToleranceError); + + // Do not propagate these messages. self.propagate_validation_result( message_id, peer_id.clone(), @@ -618,7 +626,12 @@ impl Worker { "block" => %beacon_block_root, "type" => ?attestation_type, ); + // We still penalize the peer slightly. We don't want this to be a recurring + // behaviour. + self.penalize_peer(peer_id.clone(), PeerAction::HighToleranceError); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + return; } AttnError::PriorAttestationKnown { .. } => { @@ -634,7 +647,12 @@ impl Worker { "block" => %beacon_block_root, "type" => ?attestation_type, ); + // We still penalize the peer slightly. We don't want this to be a recurring + // behaviour. + self.penalize_peer(peer_id.clone(), PeerAction::HighToleranceError); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + return; } AttnError::ValidatorIndexTooHigh(_) => { @@ -677,6 +695,10 @@ impl Worker { "msg" => "UnknownBlockHash" ) }); + // We still penalize the peer slightly. We don't want this to be a recurring + // behaviour. + self.penalize_peer(peer_id.clone(), PeerAction::HighToleranceError); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return; } diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index f6aa777ab9b..31bad7a3441 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -3,15 +3,18 @@ extern crate lazy_static; /// This crate provides the network server for Lighthouse. pub mod error; +#[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy pub mod service; mod attestation_service; mod beacon_processor; +#[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy mod metrics; mod nat; mod persisted_dht; mod router; mod status; +#[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy mod sync; pub use eth2_libp2p::NetworkConfig; diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 8ccb76b5560..58c0305cfd1 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -1,5 +1,12 @@ use beacon_chain::attestation_verification::Error as AttnError; +use eth2_libp2p::PubsubMessage; +use eth2_libp2p::{ + types::GossipKind, BandwidthSinks, GossipTopic, Gossipsub, NetworkGlobals, TopicHash, +}; +use fnv::FnvHashMap; pub use lighthouse_metrics::*; +use std::{collections::HashMap, sync::Arc}; +use types::{subnet_id::subnet_id_to_string, EthSpec}; lazy_static! { @@ -404,6 +411,27 @@ lazy_static! { "gossipsub_attestation_error_beacon_chain_error", "Count of a specific error type (see metric name)" ); + + pub static ref INBOUND_LIBP2P_BYTES: Result = + try_create_int_gauge("libp2p_inbound_bytes", "The inbound bandwidth over libp2p"); + + pub static ref OUTBOUND_LIBP2P_BYTES: Result = try_create_int_gauge( + "libp2p_outbound_bytes", + "The outbound bandwidth over libp2p" + ); + pub static ref TOTAL_LIBP2P_BANDWIDTH: Result = try_create_int_gauge( + "libp2p_total_bandwidth", + "The total inbound/outbound bandwidth over libp2p" + ); +} + +pub fn update_bandwidth_metrics(bandwidth: Arc) { + set_gauge(&INBOUND_LIBP2P_BYTES, bandwidth.total_inbound() as i64); + set_gauge(&OUTBOUND_LIBP2P_BYTES, bandwidth.total_outbound() as i64); + set_gauge( + &TOTAL_LIBP2P_BANDWIDTH, + (bandwidth.total_inbound() + bandwidth.total_outbound()) as i64, + ); } lazy_static! { @@ -486,3 +514,359 @@ pub fn register_attestation_error(error: &AttnError) { AttnError::BeaconChainError(_) => inc_counter(&GOSSIP_ATTESTATION_ERROR_BEACON_CHAIN_ERROR), } } + +/// Inspects the `messages` that were being sent to the network and updates Prometheus metrics. +pub fn expose_publish_metrics(messages: &[PubsubMessage]) { + for message in messages { + match message { + PubsubMessage::BeaconBlock(_) => inc_counter(&GOSSIP_BLOCKS_TX), + PubsubMessage::Attestation(subnet_id) => { + inc_counter_vec( + &ATTESTATIONS_PUBLISHED_PER_SUBNET_PER_SLOT, + &[&subnet_id.0.as_ref()], + ); + inc_counter(&GOSSIP_UNAGGREGATED_ATTESTATIONS_TX) + } + PubsubMessage::AggregateAndProofAttestation(_) => { + inc_counter(&GOSSIP_AGGREGATED_ATTESTATIONS_TX) + } + _ => {} + } + } +} + +/// Inspects a `message` received from the network and updates Prometheus metrics. +pub fn expose_receive_metrics(message: &PubsubMessage) { + match message { + PubsubMessage::BeaconBlock(_) => inc_counter(&GOSSIP_BLOCKS_RX), + PubsubMessage::Attestation(_) => inc_counter(&GOSSIP_UNAGGREGATED_ATTESTATIONS_RX), + PubsubMessage::AggregateAndProofAttestation(_) => { + inc_counter(&GOSSIP_AGGREGATED_ATTESTATIONS_RX) + } + _ => {} + } +} + +pub fn update_gossip_metrics( + gossipsub: &Gossipsub, + network_globals: &Arc>, +) { + // Clear the metrics + let _ = PEERS_PER_PROTOCOL.as_ref().map(|gauge| gauge.reset()); + let _ = PEERS_PER_PROTOCOL.as_ref().map(|gauge| gauge.reset()); + let _ = MESH_PEERS_PER_MAIN_TOPIC + .as_ref() + .map(|gauge| gauge.reset()); + let _ = AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC + .as_ref() + .map(|gauge| gauge.reset()); + let _ = AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC + .as_ref() + .map(|gauge| gauge.reset()); + + let _ = SCORES_BELOW_ZERO_PER_CLIENT + .as_ref() + .map(|gauge| gauge.reset()); + let _ = SCORES_BELOW_GOSSIP_THRESHOLD_PER_CLIENT + .as_ref() + .map(|gauge| gauge.reset()); + let _ = SCORES_BELOW_PUBLISH_THRESHOLD_PER_CLIENT + .as_ref() + .map(|gauge| gauge.reset()); + let _ = SCORES_BELOW_GREYLIST_THRESHOLD_PER_CLIENT + .as_ref() + .map(|gauge| gauge.reset()); + let _ = MIN_SCORES_PER_CLIENT.as_ref().map(|gauge| gauge.reset()); + let _ = MEDIAN_SCORES_PER_CLIENT.as_ref().map(|gauge| gauge.reset()); + let _ = MEAN_SCORES_PER_CLIENT.as_ref().map(|gauge| gauge.reset()); + let _ = MAX_SCORES_PER_CLIENT.as_ref().map(|gauge| gauge.reset()); + + let _ = BEACON_BLOCK_MESH_PEERS_PER_CLIENT + .as_ref() + .map(|gauge| gauge.reset()); + let _ = BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT + .as_ref() + .map(|gauge| gauge.reset()); + + // reset the mesh peers, showing all subnets + for subnet_id in 0..T::default_spec().attestation_subnet_count { + let _ = get_int_gauge( + &MESH_PEERS_PER_SUBNET_TOPIC, + &[subnet_id_to_string(subnet_id)], + ) + .map(|v| v.set(0)); + + let _ = get_int_gauge( + &GOSSIPSUB_SUBSCRIBED_SUBNET_TOPIC, + &[subnet_id_to_string(subnet_id)], + ) + .map(|v| v.set(0)); + + let _ = get_int_gauge( + &GOSSIPSUB_SUBSCRIBED_PEERS_SUBNET_TOPIC, + &[subnet_id_to_string(subnet_id)], + ) + .map(|v| v.set(0)); + } + + // Subnet topics subscribed to + for topic_hash in gossipsub.topics() { + if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { + if let GossipKind::Attestation(subnet_id) = topic.kind() { + let _ = get_int_gauge( + &GOSSIPSUB_SUBSCRIBED_SUBNET_TOPIC, + &[subnet_id_to_string(subnet_id.into())], + ) + .map(|v| v.set(1)); + } + } + } + + // Peers per subscribed subnet + let mut peers_per_topic: HashMap = HashMap::new(); + for (peer_id, topics) in gossipsub.all_peers() { + for topic_hash in topics { + *peers_per_topic.entry(topic_hash.clone()).or_default() += 1; + + if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { + match topic.kind() { + GossipKind::Attestation(subnet_id) => { + if let Some(v) = get_int_gauge( + &GOSSIPSUB_SUBSCRIBED_PEERS_SUBNET_TOPIC, + &[subnet_id_to_string(subnet_id.into())], + ) { + v.inc() + }; + + // average peer scores + if let Some(score) = gossipsub.peer_score(peer_id) { + if let Some(v) = get_gauge( + &AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC, + &[subnet_id_to_string(subnet_id.into())], + ) { + v.add(score) + }; + } + } + kind => { + // main topics + if let Some(score) = gossipsub.peer_score(peer_id) { + if let Some(v) = get_gauge( + &AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC, + &[kind.as_ref()], + ) { + v.add(score) + }; + } + } + } + } + } + } + // adjust to average scores by dividing by number of peers + for (topic_hash, peers) in peers_per_topic.iter() { + if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { + match topic.kind() { + GossipKind::Attestation(subnet_id) => { + // average peer scores + if let Some(v) = get_gauge( + &AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC, + &[subnet_id_to_string(subnet_id.into())], + ) { + v.set(v.get() / (*peers as f64)) + }; + } + kind => { + // main topics + if let Some(v) = + get_gauge(&AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC, &[kind.as_ref()]) + { + v.set(v.get() / (*peers as f64)) + }; + } + } + } + } + + // mesh peers + for topic_hash in gossipsub.topics() { + let peers = gossipsub.mesh_peers(&topic_hash).count(); + if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { + match topic.kind() { + GossipKind::Attestation(subnet_id) => { + if let Some(v) = get_int_gauge( + &MESH_PEERS_PER_SUBNET_TOPIC, + &[subnet_id_to_string(subnet_id.into())], + ) { + v.set(peers as i64) + }; + } + kind => { + // main topics + if let Some(v) = get_int_gauge(&MESH_PEERS_PER_MAIN_TOPIC, &[kind.as_ref()]) { + v.set(peers as i64) + }; + } + } + } + } + + // protocol peers + let mut peers_per_protocol: HashMap<&'static str, i64> = HashMap::new(); + for (_peer, protocol) in gossipsub.peer_protocol() { + *peers_per_protocol + .entry(protocol.as_static_ref()) + .or_default() += 1; + } + + for (protocol, peers) in peers_per_protocol.iter() { + if let Some(v) = get_int_gauge(&PEERS_PER_PROTOCOL, &[protocol]) { + v.set(*peers) + }; + } + + let mut peer_to_client = HashMap::new(); + let mut scores_per_client: HashMap<&'static str, Vec> = HashMap::new(); + { + let peers = network_globals.peers.read(); + for (peer_id, _) in gossipsub.all_peers() { + let client = peers + .peer_info(peer_id) + .map(|peer_info| peer_info.client.kind.as_static_ref()) + .unwrap_or_else(|| "Unknown"); + + peer_to_client.insert(peer_id, client); + let score = gossipsub.peer_score(peer_id).unwrap_or(0.0); + scores_per_client.entry(client).or_default().push(score); + } + } + + // mesh peers per client + for topic_hash in gossipsub.topics() { + if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { + match topic.kind() { + GossipKind::BeaconBlock => { + for peer in gossipsub.mesh_peers(&topic_hash) { + if let Some(client) = peer_to_client.get(peer) { + if let Some(v) = + get_int_gauge(&BEACON_BLOCK_MESH_PEERS_PER_CLIENT, &[client]) + { + v.inc() + }; + } + } + } + GossipKind::BeaconAggregateAndProof => { + for peer in gossipsub.mesh_peers(&topic_hash) { + if let Some(client) = peer_to_client.get(peer) { + if let Some(v) = get_int_gauge( + &BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT, + &[client], + ) { + v.inc() + }; + } + } + } + _ => (), + } + } + } + + for (client, scores) in scores_per_client.into_iter() { + let c = &[client]; + let len = scores.len(); + if len > 0 { + let mut below0 = 0; + let mut below_gossip_threshold = 0; + let mut below_publish_threshold = 0; + let mut below_greylist_threshold = 0; + let mut min = f64::INFINITY; + let mut sum = 0.0; + let mut max = f64::NEG_INFINITY; + + let count = scores.len() as f64; + + for &score in &scores { + if score < 0.0 { + below0 += 1; + } + if score < -4000.0 { + //TODO not hardcode + below_gossip_threshold += 1; + } + if score < -8000.0 { + //TODO not hardcode + below_publish_threshold += 1; + } + if score < -16000.0 { + //TODO not hardcode + below_greylist_threshold += 1; + } + if score < min { + min = score; + } + if score > max { + max = score; + } + sum += score; + } + + let median = if len == 0 { + 0.0 + } else if len % 2 == 0 { + (scores[len / 2 - 1] + scores[len / 2]) / 2.0 + } else { + scores[len / 2] + }; + + set_gauge_entry(&SCORES_BELOW_ZERO_PER_CLIENT, c, below0 as f64 / count); + set_gauge_entry( + &SCORES_BELOW_GOSSIP_THRESHOLD_PER_CLIENT, + c, + below_gossip_threshold as f64 / count, + ); + set_gauge_entry( + &SCORES_BELOW_PUBLISH_THRESHOLD_PER_CLIENT, + c, + below_publish_threshold as f64 / count, + ); + set_gauge_entry( + &SCORES_BELOW_GREYLIST_THRESHOLD_PER_CLIENT, + c, + below_greylist_threshold as f64 / count, + ); + + set_gauge_entry(&MIN_SCORES_PER_CLIENT, c, min); + set_gauge_entry(&MEDIAN_SCORES_PER_CLIENT, c, median); + set_gauge_entry(&MEAN_SCORES_PER_CLIENT, c, sum / count); + set_gauge_entry(&MAX_SCORES_PER_CLIENT, c, max); + } + } +} + +pub fn update_sync_metrics(network_globals: &Arc>) { + // reset the counts + if PEERS_PER_SYNC_TYPE + .as_ref() + .map(|metric| metric.reset()) + .is_err() + { + return; + }; + + // count per sync status, the number of connected peers + let mut peers_per_sync_type = FnvHashMap::default(); + for sync_type in network_globals + .peers + .read() + .connected_peers() + .map(|(_peer_id, info)| info.sync_status.as_str()) + { + *peers_per_sync_type.entry(sync_type).or_default() += 1; + } + + for (sync_type, peer_count) in peers_per_sync_type { + set_gauge_entry(&PEERS_PER_SYNC_TYPE, &[sync_type], peer_count); + } +} diff --git a/beacon_node/network/src/nat.rs b/beacon_node/network/src/nat.rs index 1b6520ef800..02444240dbb 100644 --- a/beacon_node/network/src/nat.rs +++ b/beacon_node/network/src/nat.rs @@ -5,7 +5,7 @@ use crate::{NetworkConfig, NetworkMessage}; use if_addrs::get_if_addrs; -use slog::{debug, info, warn}; +use slog::{debug, info}; use std::net::{IpAddr, SocketAddr, SocketAddrV4}; use tokio::sync::mpsc; use types::EthSpec; @@ -70,6 +70,8 @@ pub fn construct_upnp_mappings( Some(v) => v, }; + debug!(log, "UPnP Local IP Discovered"; "ip" => ?local_ip); + match local_ip { IpAddr::V4(address) => { let libp2p_socket = SocketAddrV4::new(address, config.tcp_port); @@ -78,53 +80,39 @@ pub fn construct_upnp_mappings( // one. // I've found this to be more reliable. If multiple users are behind a single // router, they should ideally try to set different port numbers. - let tcp_socket = match gateway.add_port( + let tcp_socket = add_port_mapping( + &gateway, igd::PortMappingProtocol::TCP, - libp2p_socket.port(), libp2p_socket, - 0, - "lighthouse-tcp", - ) { - Err(e) => { - info!(log, "UPnP TCP route not set"; "error" => %e); - None - } - Ok(_) => { - info!(log, "UPnP TCP route established"; "external_socket" => format!("{}:{}", external_ip.as_ref().map(|ip| ip.to_string()).unwrap_or_else(|_| "".into()), config.tcp_port)); - external_ip - .as_ref() - .map(|ip| SocketAddr::new(ip.clone().into(), config.tcp_port)) - .ok() - } - }; + "tcp", + &log, + ).and_then(|_| { + let external_socket = external_ip.as_ref().map(|ip| SocketAddr::new(ip.clone().into(), config.tcp_port)).map_err(|_| ()); + info!(log, "UPnP TCP route established"; "external_socket" => format!("{}:{}", external_socket.as_ref().map(|ip| ip.to_string()).unwrap_or_else(|_| "".into()), config.tcp_port)); + external_socket + }).ok(); let udp_socket = if !config.disable_discovery { let discovery_socket = SocketAddrV4::new(address, config.udp_port); - match gateway.add_port( + add_port_mapping( + &gateway, igd::PortMappingProtocol::UDP, - discovery_socket.port(), discovery_socket, - 0, - "lighthouse-udp", - ) { - Err(e) => { - info!(log, "UPnP UDP route not set"; "error" => %e); - None - } - Ok(_) => { - info!(log, "UPnP UDP route established"; "external_socket" => format!("{}:{}", external_ip.as_ref().map(|ip| ip.to_string()).unwrap_or_else(|_| "".into()), config.tcp_port)); - external_ip - .map(|ip| SocketAddr::new(ip.into(), config.tcp_port)) - .ok() - } - } + "udp", + &log, + ).and_then(|_| { + let external_socket = external_ip + .map(|ip| SocketAddr::new(ip.into(), config.udp_port)).map_err(|_| ()); + info!(log, "UPnP UDP route established"; "external_socket" => format!("{}:{}", external_socket.as_ref().map(|ip| ip.to_string()).unwrap_or_else(|_| "".into()), config.udp_port)); + external_socket + }).ok() } else { None }; // report any updates to the network service. network_send.send(NetworkMessage::UPnPMappingEstablished{ tcp_socket, udp_socket }) - .unwrap_or_else(|e| warn!(log, "Could not send message to the network service"; "error" => %e)); + .unwrap_or_else(|e| debug!(log, "Could not send message to the network service"; "error" => %e)); } _ => debug!(log, "UPnP no routes constructed. IPv6 not supported"), } @@ -132,6 +120,50 @@ pub fn construct_upnp_mappings( }; } +/// Sets up a port mapping for a protocol returning the mapped port if successful. +fn add_port_mapping( + gateway: &igd::Gateway, + protocol: igd::PortMappingProtocol, + socket: SocketAddrV4, + protocol_string: &'static str, + log: &slog::Logger, +) -> Result<(), ()> { + // We add specific port mappings rather than getting the router to arbitrary assign + // one. + // I've found this to be more reliable. If multiple users are behind a single + // router, they should ideally try to set different port numbers. + let mapping_string = &format!("lighthouse-{}", protocol_string); + for _ in 0..2 { + match gateway.add_port(protocol, socket.port(), socket, 0, mapping_string) { + Err(e) => { + match e { + igd::AddPortError::PortInUse => { + // Try and remove and re-create + debug!(log, "UPnP port in use, attempting to remap"; "protocol" => protocol_string, "port" => socket.port()); + match gateway.remove_port(protocol, socket.port()) { + Ok(()) => { + debug!(log, "UPnP Removed port mapping"; "protocol" => protocol_string, "port" => socket.port()) + } + Err(e) => { + debug!(log, "UPnP Port remove failure"; "protocol" => protocol_string, "port" => socket.port(), "error" => %e); + return Err(()); + } + } + } + e => { + info!(log, "UPnP TCP route not set"; "error" => %e); + return Err(()); + } + } + } + Ok(_) => { + return Ok(()); + } + } + } + Err(()) +} + /// Removes the specified TCP and UDP port mappings. pub fn remove_mappings(tcp_port: Option, udp_port: Option, log: &slog::Logger) { if tcp_port.is_some() || udp_port.is_some() { diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index f4d6532c927..a083a4af056 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -8,20 +8,16 @@ use crate::{error, metrics}; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2_libp2p::{ rpc::{GoodbyeReason, RPCResponseErrorCode, RequestId}, - Gossipsub, Libp2pEvent, PeerAction, PeerRequestId, PubsubMessage, Request, Response, -}; -use eth2_libp2p::{ - types::GossipKind, BehaviourEvent, GossipTopic, MessageId, NetworkGlobals, PeerId, TopicHash, + Libp2pEvent, PeerAction, PeerRequestId, PubsubMessage, Request, Response, }; +use eth2_libp2p::{types::GossipKind, BehaviourEvent, MessageId, NetworkGlobals, PeerId}; use eth2_libp2p::{MessageAcceptance, Service as LibP2PService}; -use fnv::FnvHashMap; use futures::prelude::*; use slog::{debug, error, info, o, trace, warn}; -use std::{collections::HashMap, net::SocketAddr, sync::Arc, time::Duration}; +use std::{net::SocketAddr, sync::Arc, time::Duration}; use store::HotColdDB; use tokio::sync::mpsc; -use tokio::time::Delay; -use types::subnet_id::subnet_id_to_string; +use tokio::time::Sleep; use types::{EthSpec, RelativeEpoch, SubnetId, Unsigned, ValidatorSubscription}; mod tests; @@ -111,7 +107,7 @@ pub struct NetworkService { /// update the UDP socket of discovery if the UPnP mappings get established. discovery_auto_update: bool, /// A delay that expires when a new fork takes place. - next_fork_update: Option, + next_fork_update: Option, /// Subscribe to all the subnets once synced. subscribe_all_subnets: bool, /// A timer for updating various network metrics. @@ -274,12 +270,12 @@ fn spawn_service( .as_ref() .map(|gauge| gauge.reset()); } - update_gossip_metrics::( + metrics::update_gossip_metrics::( &service.libp2p.swarm.gs(), &service.network_globals, ); // update sync metrics - update_sync_metrics(&service.network_globals); + metrics::update_sync_metrics(&service.network_globals); } _ = service.gossipsub_parameter_update.next() => { @@ -382,7 +378,7 @@ fn spawn_service( "count" => messages.len(), "topics" => format!("{:?}", topic_kinds) ); - expose_publish_metrics(&messages); + metrics::expose_publish_metrics(&messages); service.libp2p.swarm.publish(messages); } NetworkMessage::ReportPeer { peer_id, action } => service.libp2p.report_peer(&peer_id, action), @@ -512,7 +508,7 @@ fn spawn_service( .. } => { // Update prometheus metrics. - expose_receive_metrics(&message); + metrics::expose_receive_metrics(&message); match message { // attestation information gets processed in the attestation service PubsubMessage::Attestation(ref subnet_and_attestation) => { @@ -566,399 +562,22 @@ fn spawn_service( service.next_fork_update = next_fork_delay(&service.beacon_chain); } } + + metrics::update_bandwidth_metrics(service.libp2p.bandwidth.clone()); } }, "network"); Ok(()) } -/// Returns a `Delay` that triggers shortly after the next change in the beacon chain fork version. +/// Returns a `Sleep` that triggers shortly after the next change in the beacon chain fork version. /// If there is no scheduled fork, `None` is returned. fn next_fork_delay( beacon_chain: &BeaconChain, -) -> Option { +) -> Option { beacon_chain.duration_to_next_fork().map(|until_fork| { // Add a short time-out to start within the new fork period. let delay = Duration::from_millis(200); - tokio::time::delay_until(tokio::time::Instant::now() + until_fork + delay) + tokio::time::sleep_until(tokio::time::Instant::now() + until_fork + delay) }) } - -/// Inspects the `messages` that were being sent to the network and updates Prometheus metrics. -fn expose_publish_metrics(messages: &[PubsubMessage]) { - for message in messages { - match message { - PubsubMessage::BeaconBlock(_) => metrics::inc_counter(&metrics::GOSSIP_BLOCKS_TX), - PubsubMessage::Attestation(subnet_id) => { - metrics::inc_counter_vec( - &metrics::ATTESTATIONS_PUBLISHED_PER_SUBNET_PER_SLOT, - &[&subnet_id.0.as_ref()], - ); - metrics::inc_counter(&metrics::GOSSIP_UNAGGREGATED_ATTESTATIONS_TX) - } - PubsubMessage::AggregateAndProofAttestation(_) => { - metrics::inc_counter(&metrics::GOSSIP_AGGREGATED_ATTESTATIONS_TX) - } - _ => {} - } - } -} - -/// Inspects a `message` received from the network and updates Prometheus metrics. -fn expose_receive_metrics(message: &PubsubMessage) { - match message { - PubsubMessage::BeaconBlock(_) => metrics::inc_counter(&metrics::GOSSIP_BLOCKS_RX), - PubsubMessage::Attestation(_) => { - metrics::inc_counter(&metrics::GOSSIP_UNAGGREGATED_ATTESTATIONS_RX) - } - PubsubMessage::AggregateAndProofAttestation(_) => { - metrics::inc_counter(&metrics::GOSSIP_AGGREGATED_ATTESTATIONS_RX) - } - _ => {} - } -} - -fn update_gossip_metrics( - gossipsub: &Gossipsub, - network_globals: &Arc>, -) { - // Clear the metrics - let _ = metrics::PEERS_PER_PROTOCOL - .as_ref() - .map(|gauge| gauge.reset()); - let _ = metrics::PEERS_PER_PROTOCOL - .as_ref() - .map(|gauge| gauge.reset()); - let _ = metrics::MESH_PEERS_PER_MAIN_TOPIC - .as_ref() - .map(|gauge| gauge.reset()); - let _ = metrics::AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC - .as_ref() - .map(|gauge| gauge.reset()); - let _ = metrics::AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC - .as_ref() - .map(|gauge| gauge.reset()); - - let _ = metrics::SCORES_BELOW_ZERO_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - let _ = metrics::SCORES_BELOW_GOSSIP_THRESHOLD_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - let _ = metrics::SCORES_BELOW_PUBLISH_THRESHOLD_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - let _ = metrics::SCORES_BELOW_GREYLIST_THRESHOLD_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - let _ = metrics::MIN_SCORES_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - let _ = metrics::MEDIAN_SCORES_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - let _ = metrics::MEAN_SCORES_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - let _ = metrics::MAX_SCORES_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - - let _ = metrics::BEACON_BLOCK_MESH_PEERS_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - let _ = metrics::BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - - // reset the mesh peers, showing all subnets - for subnet_id in 0..T::default_spec().attestation_subnet_count { - let _ = metrics::get_int_gauge( - &metrics::MESH_PEERS_PER_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id)], - ) - .map(|v| v.set(0)); - - let _ = metrics::get_int_gauge( - &metrics::GOSSIPSUB_SUBSCRIBED_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id)], - ) - .map(|v| v.set(0)); - - let _ = metrics::get_int_gauge( - &metrics::GOSSIPSUB_SUBSCRIBED_PEERS_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id)], - ) - .map(|v| v.set(0)); - } - - // Subnet topics subscribed to - for topic_hash in gossipsub.topics() { - if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { - if let GossipKind::Attestation(subnet_id) = topic.kind() { - let _ = metrics::get_int_gauge( - &metrics::GOSSIPSUB_SUBSCRIBED_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id.into())], - ) - .map(|v| v.set(1)); - } - } - } - - // Peers per subscribed subnet - let mut peers_per_topic: HashMap = HashMap::new(); - for (peer_id, topics) in gossipsub.all_peers() { - for topic_hash in topics { - *peers_per_topic.entry(topic_hash.clone()).or_default() += 1; - - if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { - match topic.kind() { - GossipKind::Attestation(subnet_id) => { - if let Some(v) = metrics::get_int_gauge( - &metrics::GOSSIPSUB_SUBSCRIBED_PEERS_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id.into())], - ) { - v.inc() - }; - - // average peer scores - if let Some(score) = gossipsub.peer_score(peer_id) { - if let Some(v) = metrics::get_gauge( - &metrics::AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id.into())], - ) { - v.add(score) - }; - } - } - kind => { - // main topics - if let Some(score) = gossipsub.peer_score(peer_id) { - if let Some(v) = metrics::get_gauge( - &metrics::AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC, - &[kind.as_ref()], - ) { - v.add(score) - }; - } - } - } - } - } - } - // adjust to average scores by dividing by number of peers - for (topic_hash, peers) in peers_per_topic.iter() { - if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { - match topic.kind() { - GossipKind::Attestation(subnet_id) => { - // average peer scores - if let Some(v) = metrics::get_gauge( - &metrics::AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id.into())], - ) { - v.set(v.get() / (*peers as f64)) - }; - } - kind => { - // main topics - if let Some(v) = metrics::get_gauge( - &metrics::AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC, - &[kind.as_ref()], - ) { - v.set(v.get() / (*peers as f64)) - }; - } - } - } - } - - // mesh peers - for topic_hash in gossipsub.topics() { - let peers = gossipsub.mesh_peers(&topic_hash).count(); - if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { - match topic.kind() { - GossipKind::Attestation(subnet_id) => { - if let Some(v) = metrics::get_int_gauge( - &metrics::MESH_PEERS_PER_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id.into())], - ) { - v.set(peers as i64) - }; - } - kind => { - // main topics - if let Some(v) = metrics::get_int_gauge( - &metrics::MESH_PEERS_PER_MAIN_TOPIC, - &[kind.as_ref()], - ) { - v.set(peers as i64) - }; - } - } - } - } - - // protocol peers - let mut peers_per_protocol: HashMap<&'static str, i64> = HashMap::new(); - for (_peer, protocol) in gossipsub.peer_protocol() { - *peers_per_protocol - .entry(protocol.as_static_ref()) - .or_default() += 1; - } - - for (protocol, peers) in peers_per_protocol.iter() { - if let Some(v) = metrics::get_int_gauge(&metrics::PEERS_PER_PROTOCOL, &[protocol]) { - v.set(*peers) - }; - } - - let mut peer_to_client = HashMap::new(); - let mut scores_per_client: HashMap<&'static str, Vec> = HashMap::new(); - { - let peers = network_globals.peers.read(); - for (peer_id, _) in gossipsub.all_peers() { - let client = peers - .peer_info(peer_id) - .map(|peer_info| peer_info.client.kind.as_static_ref()) - .unwrap_or_else(|| "Unknown"); - - peer_to_client.insert(peer_id, client); - let score = gossipsub.peer_score(peer_id).unwrap_or(0.0); - scores_per_client.entry(client).or_default().push(score); - } - } - - // mesh peers per client - for topic_hash in gossipsub.topics() { - if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { - match topic.kind() { - GossipKind::BeaconBlock => { - for peer in gossipsub.mesh_peers(&topic_hash) { - if let Some(client) = peer_to_client.get(peer) { - if let Some(v) = metrics::get_int_gauge( - &metrics::BEACON_BLOCK_MESH_PEERS_PER_CLIENT, - &[client], - ) { - v.inc() - }; - } - } - } - GossipKind::BeaconAggregateAndProof => { - for peer in gossipsub.mesh_peers(&topic_hash) { - if let Some(client) = peer_to_client.get(peer) { - if let Some(v) = metrics::get_int_gauge( - &metrics::BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT, - &[client], - ) { - v.inc() - }; - } - } - } - _ => (), - } - } - } - - for (client, scores) in scores_per_client.into_iter() { - let c = &[client]; - let len = scores.len(); - if len > 0 { - let mut below0 = 0; - let mut below_gossip_threshold = 0; - let mut below_publish_threshold = 0; - let mut below_greylist_threshold = 0; - let mut min = f64::INFINITY; - let mut sum = 0.0; - let mut max = f64::NEG_INFINITY; - - let count = scores.len() as f64; - - for &score in &scores { - if score < 0.0 { - below0 += 1; - } - if score < -4000.0 { - //TODO not hardcode - below_gossip_threshold += 1; - } - if score < -8000.0 { - //TODO not hardcode - below_publish_threshold += 1; - } - if score < -16000.0 { - //TODO not hardcode - below_greylist_threshold += 1; - } - if score < min { - min = score; - } - if score > max { - max = score; - } - sum += score; - } - - let median = if len == 0 { - 0.0 - } else if len % 2 == 0 { - (scores[len / 2 - 1] + scores[len / 2]) / 2.0 - } else { - scores[len / 2] - }; - - metrics::set_gauge_entry( - &metrics::SCORES_BELOW_ZERO_PER_CLIENT, - c, - below0 as f64 / count, - ); - metrics::set_gauge_entry( - &metrics::SCORES_BELOW_GOSSIP_THRESHOLD_PER_CLIENT, - c, - below_gossip_threshold as f64 / count, - ); - metrics::set_gauge_entry( - &metrics::SCORES_BELOW_PUBLISH_THRESHOLD_PER_CLIENT, - c, - below_publish_threshold as f64 / count, - ); - metrics::set_gauge_entry( - &metrics::SCORES_BELOW_GREYLIST_THRESHOLD_PER_CLIENT, - c, - below_greylist_threshold as f64 / count, - ); - - metrics::set_gauge_entry(&metrics::MIN_SCORES_PER_CLIENT, c, min); - metrics::set_gauge_entry(&metrics::MEDIAN_SCORES_PER_CLIENT, c, median); - metrics::set_gauge_entry(&metrics::MEAN_SCORES_PER_CLIENT, c, sum / count); - metrics::set_gauge_entry(&metrics::MAX_SCORES_PER_CLIENT, c, max); - } - } -} - -fn update_sync_metrics(network_globals: &Arc>) { - // reset the counts - if metrics::PEERS_PER_SYNC_TYPE - .as_ref() - .map(|metric| metric.reset()) - .is_err() - { - return; - }; - - // count per sync status, the number of connected peers - let mut peers_per_sync_type = FnvHashMap::default(); - for sync_type in network_globals - .peers - .read() - .connected_peers() - .map(|(_peer_id, info)| info.sync_status.as_str()) - { - *peers_per_sync_type.entry(sync_type).or_default() += 1; - } - - for (sync_type, peer_count) in peers_per_sync_type { - metrics::set_gauge_entry(&metrics::PEERS_PER_SYNC_TYPE, &[sync_type], peer_count); - } -} diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index 9888dd784d7..43653f5f2d6 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -5,6 +5,7 @@ mod tests { use crate::{NetworkConfig, NetworkService}; use beacon_chain::test_utils::BeaconChainHarness; use eth2_libp2p::Enr; + //use slog::{o, Drain, Level, Logger}; use slog::Logger; use sloggers::{null::NullLoggerBuilder, Build}; use std::str::FromStr; @@ -14,6 +15,18 @@ mod tests { use types::{test_utils::generate_deterministic_keypairs, MinimalEthSpec}; fn get_logger() -> Logger { + /* For emitting logs during the tests + let drain = { + let decorator = slog_term::TermDecorator::new().build(); + let decorator = + logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); + let drain = slog_term::FullFormat::new(decorator).build().fuse(); + let drain = slog_async::Async::new(drain).chan_size(2048).build(); + drain.filter_level(Level::Debug) + }; + + Logger::root(drain.fuse(), o!()) + */ let builder = NullLoggerBuilder; builder.build().expect("should build logger") } @@ -37,12 +50,12 @@ mod tests { let enr2 = Enr::from_str("enr:-IS4QJ2d11eu6dC7E7LoXeLMgMP3kom1u3SE8esFSWvaHoo0dP1jg8O3-nx9ht-EO3CmG7L6OkHcMmoIh00IYWB92QABgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQIB_c-jQMOXsbjWkbN-Oj99H57gfId5pfb4wa1qxwV4CIN1ZHCCIyk").unwrap(); let enrs = vec![enr1, enr2]; - let runtime = Runtime::new().unwrap(); + let runtime = Arc::new(Runtime::new().unwrap()); let (signal, exit) = exit_future::signal(); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = task_executor::TaskExecutor::new( - runtime.handle().clone(), + Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx, @@ -50,9 +63,10 @@ mod tests { let mut config = NetworkConfig::default(); config.libp2p_port = 21212; + config.upnp_enabled = false; config.discovery_port = 21212; config.boot_nodes_enr = enrs.clone(); - runtime.spawn(async move { + runtime.block_on(async move { // Create a new network service which implicitly gets dropped at the // end of the block. @@ -61,7 +75,9 @@ mod tests { .unwrap(); drop(signal); }); - runtime.shutdown_timeout(tokio::time::Duration::from_millis(300)); + + let raw_runtime = Arc::try_unwrap(runtime).unwrap(); + raw_runtime.shutdown_timeout(tokio::time::Duration::from_secs(10)); // Load the persisted dht from the store let persisted_enrs = load_dht(store); diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index a468871a58c..92c74f6d17b 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -72,10 +72,8 @@ impl ProductionBeaconNode { let client_genesis = client_config.genesis.clone(); let store_config = client_config.store.clone(); let log = context.log().clone(); - let db_path = client_config.create_db_path()?; let freezer_db_path_res = client_config.create_freezer_db_path(); - let executor = context.executor.clone(); let builder = ClientBuilder::new(context.eth_spec_instance.clone()) diff --git a/beacon_node/tests/test.rs b/beacon_node/tests/test.rs index 7d860538f9b..011209a1437 100644 --- a/beacon_node/tests/test.rs +++ b/beacon_node/tests/test.rs @@ -1,11 +1,14 @@ #![cfg(test)] +//TODO: Drop compat library once reqwest and other libraries update to tokio 0.3 + use beacon_chain::StateSkipConfig; use node_test_rig::{ environment::{Environment, EnvironmentBuilder}, eth2::types::StateId, testing_client_config, LocalBeaconNode, }; +use tokio_compat_02::FutureExt; use types::{EthSpec, MinimalEthSpec, Slot}; fn env_builder() -> EnvironmentBuilder { @@ -26,18 +29,26 @@ fn build_node(env: &mut Environment) -> LocalBeaconNode { fn http_server_genesis_state() { let mut env = env_builder() .null_logger() + //.async_logger("debug", None) .expect("should build env logger") .multi_threaded_tokio_runtime() .expect("should start tokio runtime") .build() .expect("environment should build"); + // build a runtime guard + let node = build_node(&mut env); + let remote_node = node.remote_node().expect("should produce remote node"); let api_state = env .runtime() - .block_on(remote_node.get_debug_beacon_states(StateId::Slot(Slot::new(0)))) + .block_on( + remote_node + .get_debug_beacon_states(StateId::Slot(Slot::new(0))) + .compat(), + ) .expect("should fetch state from http api") .unwrap() .data; @@ -54,5 +65,6 @@ fn http_server_genesis_state() { api_state, db_state, "genesis state from api should match that from the DB" ); + env.fire_signal(); } diff --git a/beacon_node/timer/Cargo.toml b/beacon_node/timer/Cargo.toml index 6388a7ad8d9..07bfc8c0e3a 100644 --- a/beacon_node/timer/Cargo.toml +++ b/beacon_node/timer/Cargo.toml @@ -8,8 +8,8 @@ edition = "2018" beacon_chain = { path = "../beacon_chain" } types = { path = "../../consensus/types" } slot_clock = { path = "../../common/slot_clock" } -tokio = { version = "0.2.22", features = ["full"] } +tokio = { version = "0.3.2", features = ["full"] } slog = "2.5.2" parking_lot = "0.11.0" -futures = "0.3.5" +futures = "0.3.7" task_executor = { path = "../../common/task_executor" } diff --git a/beacon_node/websocket_server/Cargo.toml b/beacon_node/websocket_server/Cargo.toml index 8a10bdc5be6..c39c785b92b 100644 --- a/beacon_node/websocket_server/Cargo.toml +++ b/beacon_node/websocket_server/Cargo.toml @@ -7,11 +7,11 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -futures = "0.3.5" +futures = "0.3.7" serde = "1.0.116" serde_derive = "1.0.116" slog = "2.5.2" -tokio = { version = "0.2.22", features = ["full"] } +tokio = { version = "0.3.2", features = ["full"] } types = { path = "../../consensus/types" } ws = "0.9.1" task_executor = { path = "../../common/task_executor" } diff --git a/beacon_node/websocket_server/src/lib.rs b/beacon_node/websocket_server/src/lib.rs index 1eea57ae28c..82bbd261910 100644 --- a/beacon_node/websocket_server/src/lib.rs +++ b/beacon_node/websocket_server/src/lib.rs @@ -79,11 +79,11 @@ pub fn start_server( // Place a future on the handle that will shutdown the websocket server when the // application exits. - executor.runtime_handle().spawn(exit_future); - let log_inner = log.clone(); + executor.spawn(exit_future, "Websocket exit"); - let _ = std::thread::spawn(move || match server.run() { + let log_inner = log.clone(); + let server_future = move || match server.run() { Ok(_) => { debug!( log_inner, @@ -97,7 +97,9 @@ pub fn start_server( "error" => format!("{:?}", e) ); } - }); + }; + + executor.spawn_blocking(server_future, "Websocket server"); info!( log, diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 85020e4a312..bc3e80ad05c 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -13,12 +13,12 @@ eth2_testnet_config = { path = "../common/eth2_testnet_config" } eth2_ssz = "0.1.2" slog = "2.5.2" sloggers = "1.0.1" -tokio = "0.2.22" +tokio = "0.3.2" log = "0.4.11" slog-term = "2.6.0" logging = { path = "../common/logging" } slog-async = "2.5.0" slog-scope = "4.3.0" slog-stdlog = "4.0.0" -futures = "0.3.5" +futures = "0.3.7" hex = "0.4.2" diff --git a/boot_node/src/lib.rs b/boot_node/src/lib.rs index be3536f3ebc..6e883872806 100644 --- a/boot_node/src/lib.rs +++ b/boot_node/src/lib.rs @@ -59,8 +59,7 @@ pub fn run(matches: &ArgMatches<'_>, eth_spec_id: EthSpecId, debug_level: String fn main(matches: &ArgMatches<'_>, log: slog::Logger) -> Result<(), String> { // Builds a custom executor for the bootnode - let mut runtime = tokio::runtime::Builder::new() - .threaded_scheduler() + let runtime = tokio::runtime::Builder::new_multi_thread() .enable_all() .build() .map_err(|e| format!("Failed to build runtime: {}", e))?; diff --git a/boot_node/src/server.rs b/boot_node/src/server.rs index 367948a4bf3..14f37113690 100644 --- a/boot_node/src/server.rs +++ b/boot_node/src/server.rs @@ -52,7 +52,7 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { } // start the server - if let Err(e) = discv5.start(config.listen_socket) { + if let Err(e) = discv5.start(config.listen_socket).await { slog::crit!(log, "Could not start discv5 server"; "error" => e.to_string()); return; } diff --git a/common/eth2_testnet_config/build.rs b/common/eth2_testnet_config/build.rs index 6070c3a13e3..697f63a2bf5 100644 --- a/common/eth2_testnet_config/build.rs +++ b/common/eth2_testnet_config/build.rs @@ -52,8 +52,11 @@ fn uncompress_state(testnet: &Eth2NetArchiveAndDirectory<'static>) -> Result<(), .map_err(|e| format!("Error writing file {:?}: {}", path, e))?; } else { // Create empty genesis.ssz if genesis is unknown - File::create(testnet.dir().join(GENESIS_FILE_NAME)) - .map_err(|e| format!("Failed to create {}: {}", GENESIS_FILE_NAME, e))?; + let genesis_file = testnet.dir().join(GENESIS_FILE_NAME); + if !genesis_file.exists() { + File::create(genesis_file) + .map_err(|e| format!("Failed to create {}: {}", GENESIS_FILE_NAME, e))?; + } } Ok(()) diff --git a/common/hashset_delay/Cargo.toml b/common/hashset_delay/Cargo.toml index b577b97d60c..cba88c5ecc5 100644 --- a/common/hashset_delay/Cargo.toml +++ b/common/hashset_delay/Cargo.toml @@ -5,8 +5,8 @@ authors = ["Sigma Prime "] edition = "2018" [dependencies] -futures = "0.3.5" -tokio = { version = "0.2.22", features = ["time"] } +futures = "0.3.7" +tokio-util = { version = "0.4.0", features = ["time"] } [dev-dependencies] -tokio = { version = "0.2.22", features = ["time", "rt-threaded", "macros"] } +tokio = { version = "0.3.2", features = ["time", "rt-multi-thread", "macros"] } diff --git a/common/hashset_delay/src/hashset_delay.rs b/common/hashset_delay/src/hashset_delay.rs index 5eff0f03cdd..052d71fe3bc 100644 --- a/common/hashset_delay/src/hashset_delay.rs +++ b/common/hashset_delay/src/hashset_delay.rs @@ -12,7 +12,7 @@ use std::{ task::{Context, Poll}, time::{Duration, Instant}, }; -use tokio::time::delay_queue::{self, DelayQueue}; +use tokio_util::time::delay_queue::{self, DelayQueue}; pub struct HashSetDelay where diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index a0996c7d03b..1c5efc67701 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -5,9 +5,10 @@ authors = ["Sigma Prime "] edition = "2018" [dependencies] -tokio = { version = "0.2.22", features = ["rt-threaded", "macros", "blocking"] } +tokio = { version = "0.3.2", features = ["rt"] } slog = "2.5.2" -futures = "0.3.5" +futures = "0.3.7" exit-future = "0.2.0" lazy_static = "1.4.0" lighthouse_metrics = { path = "../lighthouse_metrics" } +tokio-compat-02 = "0.1" diff --git a/common/task_executor/src/lib.rs b/common/task_executor/src/lib.rs index 9f819dd2a26..e5a263ae394 100644 --- a/common/task_executor/src/lib.rs +++ b/common/task_executor/src/lib.rs @@ -3,13 +3,15 @@ mod metrics; use futures::channel::mpsc::Sender; use futures::prelude::*; use slog::{debug, o, trace}; -use tokio::runtime::Handle; +use std::sync::Weak; +use tokio::runtime::Runtime; +use tokio_compat_02::FutureExt; /// A wrapper over a runtime handle which can spawn async and blocking tasks. #[derive(Clone)] pub struct TaskExecutor { /// The handle to the runtime on which tasks are spawned - handle: Handle, + runtime: Weak, /// The receiver exit future which on receiving shuts down the task exit: exit_future::Exit, /// Sender given to tasks, so that if they encounter a state in which execution cannot @@ -27,13 +29,13 @@ impl TaskExecutor { /// Note: this function is mainly useful in tests. A `TaskExecutor` should be normally obtained from /// a [`RuntimeContext`](struct.RuntimeContext.html) pub fn new( - handle: Handle, + runtime: Weak, exit: exit_future::Exit, log: slog::Logger, signal_tx: Sender<&'static str>, ) -> Self { Self { - handle, + runtime, exit, signal_tx, log, @@ -43,7 +45,7 @@ impl TaskExecutor { /// Clones the task executor adding a service name. pub fn clone_with_name(&self, service_name: String) -> Self { TaskExecutor { - handle: self.handle.clone(), + runtime: self.runtime.clone(), exit: self.exit.clone(), signal_tx: self.signal_tx.clone(), log: self.log.new(o!("service" => service_name)), @@ -61,7 +63,7 @@ impl TaskExecutor { if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) { // Task is shutdown before it completes if `exit` receives let int_gauge_1 = int_gauge.clone(); - let future = future::select(Box::pin(task), exit).then(move |either| { + let future = future::select(Box::pin(task.compat()), exit).then(move |either| { match either { future::Either::Left(_) => trace!(log, "Async task completed"; "task" => name), future::Either::Right(_) => { @@ -73,7 +75,11 @@ impl TaskExecutor { }); int_gauge.inc(); - self.handle.spawn(future); + if let Some(runtime) = self.runtime.upgrade() { + runtime.spawn(future); + } else { + debug!(self.log, "Couldn't spawn task. Runtime shutting down"); + } } } @@ -93,13 +99,19 @@ impl TaskExecutor { ) { if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) { let int_gauge_1 = int_gauge.clone(); - let future = task.then(move |_| { - int_gauge_1.dec(); - futures::future::ready(()) - }); + let future = task + .then(move |_| { + int_gauge_1.dec(); + futures::future::ready(()) + }) + .compat(); int_gauge.inc(); - self.handle.spawn(future); + if let Some(runtime) = self.runtime.upgrade() { + runtime.spawn(future); + } else { + debug!(self.log, "Couldn't spawn task. Runtime shutting down"); + } } } @@ -109,7 +121,6 @@ impl TaskExecutor { where F: FnOnce() + Send + 'static, { - let exit = self.exit.clone(); let log = self.log.clone(); if let Some(metric) = metrics::get_histogram(&metrics::BLOCKING_TASKS_HISTOGRAM, &[name]) { @@ -117,31 +128,128 @@ impl TaskExecutor { { let int_gauge_1 = int_gauge.clone(); let timer = metric.start_timer(); - let join_handle = self.handle.spawn_blocking(task); + let join_handle = if let Some(runtime) = self.runtime.upgrade() { + runtime.spawn_blocking(task) + } else { + debug!(self.log, "Couldn't spawn task. Runtime shutting down"); + return; + }; - let future = future::select(join_handle, exit).then(move |either| { - match either { - future::Either::Left(_) => { - trace!(log, "Blocking task completed"; "task" => name) - } - future::Either::Right(_) => { - debug!(log, "Blocking task shutdown, exit received"; "task" => name) - } - } + let future = async move { + match join_handle.await { + Ok(_) => trace!(log, "Blocking task completed"; "task" => name), + Err(e) => debug!(log, "Blocking task failed"; "error" => %e), + }; timer.observe_duration(); int_gauge_1.dec(); - futures::future::ready(()) - }); + }; int_gauge.inc(); - self.handle.spawn(future); + if let Some(runtime) = self.runtime.upgrade() { + runtime.spawn(future); + } else { + debug!(self.log, "Couldn't spawn task. Runtime shutting down"); + } + } + } + } + /// Spawn a future on the tokio runtime wrapped in an `exit_future::Exit` returning an optional + /// join handle to the future. + /// The task is canceled when the corresponding exit_future `Signal` is fired/dropped. + /// + /// This function generates prometheus metrics on number of tasks and task duration. + pub fn spawn_handle( + &self, + task: impl Future + Send + 'static, + name: &'static str, + ) -> Option>> { + let exit = self.exit.clone(); + let log = self.log.clone(); + + if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) { + // Task is shutdown before it completes if `exit` receives + let int_gauge_1 = int_gauge.clone(); + let future = future::select(Box::pin(task), exit).then(move |either| { + let result = match either { + future::Either::Left((task, _)) => { + trace!(log, "Async task completed"; "task" => name); + Some(task) + } + future::Either::Right(_) => { + debug!(log, "Async task shutdown, exit received"; "task" => name); + None + } + }; + int_gauge_1.dec(); + futures::future::ready(result) + }); + + int_gauge.inc(); + if let Some(runtime) = self.runtime.upgrade() { + Some(runtime.spawn(future.compat())) + } else { + debug!(self.log, "Couldn't spawn task. Runtime shutting down"); + None + } + } else { + None + } + } + + /// Spawn a blocking task on a dedicated tokio thread pool wrapped in an exit future returning + /// a join handle to the future. + /// If the runtime doesn't exist, this will return None. + /// The Future returned behaves like the standard JoinHandle which can return an error if the + /// task failed. + /// This function generates prometheus metrics on number of tasks and task duration. + pub fn spawn_blocking_handle( + &self, + task: F, + name: &'static str, + ) -> Option>> + where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, + { + let log = self.log.clone(); + + if let Some(metric) = metrics::get_histogram(&metrics::BLOCKING_TASKS_HISTOGRAM, &[name]) { + if let Some(int_gauge) = metrics::get_int_gauge(&metrics::BLOCKING_TASKS_COUNT, &[name]) + { + let int_gauge_1 = int_gauge; + let timer = metric.start_timer(); + let join_handle = if let Some(runtime) = self.runtime.upgrade() { + runtime.spawn_blocking(task) + } else { + debug!(self.log, "Couldn't spawn task. Runtime shutting down"); + return None; + }; + + Some(async move { + let result = match join_handle.await { + Ok(result) => { + trace!(log, "Blocking task completed"; "task" => name); + Ok(result) + } + Err(e) => { + debug!(log, "Blocking task ended unexpectedly"; "error" => %e); + Err(e) + } + }; + timer.observe_duration(); + int_gauge_1.dec(); + result + }) + } else { + None } + } else { + None } } - /// Returns the underlying runtime handle. - pub fn runtime_handle(&self) -> Handle { - self.handle.clone() + pub fn runtime(&self) -> Weak { + self.runtime.clone() } /// Returns a copy of the `exit_future::Exit`. diff --git a/common/warp_utils/Cargo.toml b/common/warp_utils/Cargo.toml index cf77cccd36f..84f25d5ae03 100644 --- a/common/warp_utils/Cargo.toml +++ b/common/warp_utils/Cargo.toml @@ -7,14 +7,14 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -warp = { git = "https://github.com/paulhauner/warp", branch = "cors-wildcard" } +warp = { git = "https://github.com/sigp/warp ", branch = "lighthouse" } eth2 = { path = "../eth2" } types = { path = "../../consensus/types" } beacon_chain = { path = "../../beacon_node/beacon_chain" } state_processing = { path = "../../consensus/state_processing" } safe_arith = { path = "../../consensus/safe_arith" } serde = { version = "1.0.116", features = ["derive"] } -tokio = { version = "0.2.22", features = ["sync"] } +tokio = { version = "0.3.2", features = ["sync"] } headers = "0.3.2" lighthouse_metrics = { path = "../lighthouse_metrics" } lazy_static = "1.4.0" diff --git a/common/warp_utils/src/task.rs b/common/warp_utils/src/task.rs index da4cf91be70..c3b3e86e2ed 100644 --- a/common/warp_utils/src/task.rs +++ b/common/warp_utils/src/task.rs @@ -1,19 +1,21 @@ use serde::Serialize; -/// Execute some task in a tokio "blocking thread". These threads are ideal for long-running -/// (blocking) tasks since they don't jam up the core executor. -pub async fn blocking_task(func: F) -> T +/// A convenience wrapper around `blocking_task`. +pub async fn blocking_task(func: F) -> Result where - F: Fn() -> T, + F: FnOnce() -> Result + Send + 'static, + T: Send + 'static, { - tokio::task::block_in_place(func) + tokio::task::spawn_blocking(func) + .await + .unwrap_or_else(|_| Err(warp::reject::reject())) // This should really be a 500 } /// A convenience wrapper around `blocking_task` for use with `warp` JSON responses. pub async fn blocking_json_task(func: F) -> Result where - F: Fn() -> Result, - T: Serialize, + F: FnOnce() -> Result + Send + 'static, + T: Serialize + Send + 'static, { blocking_task(func) .await diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index e77d76b69a8..99a93e343c9 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -20,7 +20,7 @@ types = { path = "../consensus/types" } state_processing = { path = "../consensus/state_processing" } eth2_ssz = "0.1.2" regex = "1.3.9" -futures = { version = "0.3.5", features = ["compat"] } +futures = { version = "0.3.7", features = ["compat"] } environment = { path = "../lighthouse/environment" } web3 = "0.11.0" eth2_testnet_config = { path = "../common/eth2_testnet_config" } @@ -28,7 +28,7 @@ dirs = "3.0.1" genesis = { path = "../beacon_node/genesis" } deposit_contract = { path = "../common/deposit_contract" } tree_hash = "0.1.1" -tokio = { version = "0.2.22", features = ["full"] } +tokio = { version = "0.3.2", features = ["full"] } clap_utils = { path = "../common/clap_utils" } eth2_libp2p = { path = "../beacon_node/eth2_libp2p" } validator_dir = { path = "../common/validator_dir", features = ["insecure_keys"] } @@ -36,3 +36,4 @@ rand = "0.7.3" eth2_keystore = { path = "../crypto/eth2_keystore" } lighthouse_version = { path = "../common/lighthouse_version" } directory = { path = "../common/directory" } +tokio-compat-02 = "0.1" diff --git a/lcli/src/deploy_deposit_contract.rs b/lcli/src/deploy_deposit_contract.rs index 71d22f59a24..33a385fb0cb 100644 --- a/lcli/src/deploy_deposit_contract.rs +++ b/lcli/src/deploy_deposit_contract.rs @@ -6,6 +6,7 @@ use deposit_contract::{ use environment::Environment; use futures::compat::Future01CompatExt; use std::path::PathBuf; +use tokio_compat_02::FutureExt; use types::EthSpec; use web3::{ contract::{Contract, Options}, @@ -14,7 +15,7 @@ use web3::{ Web3, }; -pub fn run(mut env: Environment, matches: &ArgMatches<'_>) -> Result<(), String> { +pub fn run(env: Environment, matches: &ArgMatches<'_>) -> Result<(), String> { let eth1_ipc_path: PathBuf = clap_utils::parse_required(matches, "eth1-ipc")?; let from_address: Address = clap_utils::parse_required(matches, "from-address")?; let confirmations: usize = clap_utils::parse_required(matches, "confirmations")?; @@ -30,38 +31,41 @@ pub fn run(mut env: Environment, matches: &ArgMatches<'_>) -> Res ) })?; - env.runtime().block_on(async { - // It's unlikely that this will be the _actual_ deployment block, however it'll be close - // enough to serve our purposes. - // - // We only need the deposit block to put a lower bound on the block number we need to search - // for deposit logs. - let deploy_block = web3 - .eth() - .block_number() - .compat() - .await - .map_err(|e| format!("Failed to get block number: {}", e))?; + env.runtime().block_on( + async { + // It's unlikely that this will be the _actual_ deployment block, however it'll be close + // enough to serve our purposes. + // + // We only need the deposit block to put a lower bound on the block number we need to search + // for deposit logs. + let deploy_block = web3 + .eth() + .block_number() + .compat() + .await + .map_err(|e| format!("Failed to get block number: {}", e))?; - let pending_contract = Contract::deploy(web3.eth(), &ABI) - .map_err(|e| format!("Unable to build contract deployer: {:?}", e))? - .confirmations(confirmations) - .options(Options { - gas: Some(U256::from(CONTRACT_DEPLOY_GAS)), - ..Options::default() - }) - .execute(bytecode, (), from_address) - .map_err(|e| format!("Unable to execute deployment: {:?}", e))?; + let pending_contract = Contract::deploy(web3.eth(), &ABI) + .map_err(|e| format!("Unable to build contract deployer: {:?}", e))? + .confirmations(confirmations) + .options(Options { + gas: Some(U256::from(CONTRACT_DEPLOY_GAS)), + ..Options::default() + }) + .execute(bytecode, (), from_address) + .map_err(|e| format!("Unable to execute deployment: {:?}", e))?; - let address = pending_contract - .compat() - .await - .map_err(|e| format!("Unable to await pending contract: {:?}", e))? - .address(); + let address = pending_contract + .compat() + .await + .map_err(|e| format!("Unable to await pending contract: {:?}", e))? + .address(); - println!("deposit_contract_address: {:?}", address); - println!("deposit_contract_deploy_block: {}", deploy_block); + println!("deposit_contract_address: {:?}", address); + println!("deposit_contract_deploy_block: {}", deploy_block); - Ok(()) - }) + Ok(()) + } + .compat(), + ) } diff --git a/lcli/src/eth1_genesis.rs b/lcli/src/eth1_genesis.rs index bd03cef5ed8..f2f50908988 100644 --- a/lcli/src/eth1_genesis.rs +++ b/lcli/src/eth1_genesis.rs @@ -5,6 +5,7 @@ use genesis::{Eth1Config, Eth1GenesisService}; use ssz::Encode; use std::path::PathBuf; use std::time::Duration; +use tokio_compat_02::FutureExt; use types::EthSpec; /// Interval between polling the eth1 node for genesis information. @@ -49,19 +50,22 @@ pub fn run(mut env: Environment, matches: &ArgMatches<'_>) -> Res let genesis_service = Eth1GenesisService::new(config, env.core_context().log().clone(), spec.clone()); - env.runtime().block_on(async { - let _ = genesis_service - .wait_for_genesis_state::(ETH1_GENESIS_UPDATE_INTERVAL, spec) - .await - .map(move |genesis_state| { - eth2_testnet_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes()); - eth2_testnet_config.force_write_to_file(testnet_dir) - }) - .map_err(|e| format!("Failed to find genesis: {}", e))?; + env.runtime().block_on( + async { + let _ = genesis_service + .wait_for_genesis_state::(ETH1_GENESIS_UPDATE_INTERVAL, spec) + .await + .map(move |genesis_state| { + eth2_testnet_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes()); + eth2_testnet_config.force_write_to_file(testnet_dir) + }) + .map_err(|e| format!("Failed to find genesis: {}", e))?; - info!("Starting service to produce genesis BeaconState from eth1"); - info!("Connecting to eth1 http endpoint: {}", endpoint); + info!("Starting service to produce genesis BeaconState from eth1"); + info!("Connecting to eth1 http endpoint: {}", endpoint); - Ok(()) - }) + Ok(()) + } + .compat(), + ) } diff --git a/lcli/src/refund_deposit_contract.rs b/lcli/src/refund_deposit_contract.rs index 0efa557ef10..89943776023 100644 --- a/lcli/src/refund_deposit_contract.rs +++ b/lcli/src/refund_deposit_contract.rs @@ -2,6 +2,7 @@ use clap::ArgMatches; use environment::Environment; use futures::compat::Future01CompatExt; use std::path::PathBuf; +use tokio_compat_02::FutureExt; use types::EthSpec; use web3::{ transports::Ipc, @@ -12,7 +13,7 @@ use web3::{ /// `keccak("steal()")[0..4]` pub const STEAL_FN_SIGNATURE: &[u8] = &[0xcf, 0x7a, 0x89, 0x65]; -pub fn run(mut env: Environment, matches: &ArgMatches<'_>) -> Result<(), String> { +pub fn run(env: Environment, matches: &ArgMatches<'_>) -> Result<(), String> { let eth1_ipc_path: PathBuf = clap_utils::parse_required(matches, "eth1-ipc")?; let from: Address = clap_utils::parse_required(matches, "from-address")?; let contract_address: Address = clap_utils::parse_required(matches, "contract-address")?; @@ -21,23 +22,26 @@ pub fn run(mut env: Environment, matches: &ArgMatches<'_>) -> Res Ipc::new(eth1_ipc_path).map_err(|e| format!("Unable to connect to eth1 IPC: {:?}", e))?; let web3 = Web3::new(transport); - env.runtime().block_on(async { - let _ = web3 - .eth() - .send_transaction(TransactionRequest { - from, - to: Some(contract_address), - gas: Some(U256::from(400_000)), - gas_price: None, - value: Some(U256::zero()), - data: Some(STEAL_FN_SIGNATURE.into()), - nonce: None, - condition: None, - }) - .compat() - .await - .map_err(|e| format!("Failed to call steal fn: {:?}", e))?; + env.runtime().block_on( + async { + let _ = web3 + .eth() + .send_transaction(TransactionRequest { + from, + to: Some(contract_address), + gas: Some(U256::from(400_000)), + gas_price: None, + value: Some(U256::zero()), + data: Some(STEAL_FN_SIGNATURE.into()), + nonce: None, + condition: None, + }) + .compat() + .await + .map_err(|e| format!("Failed to call steal fn: {:?}", e))?; - Ok(()) - }) + Ok(()) + } + .compat(), + ) } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 6fec5c080a5..0ae7cefc3c8 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -16,7 +16,7 @@ milagro = ["bls/milagro"] [dependencies] beacon_node = { "path" = "../beacon_node" } -tokio = "0.2.22" +tokio = "0.3.2" slog = { version = "2.5.2", features = ["max_level_trace"] } sloggers = "1.0.1" types = { "path" = "../consensus/types" } @@ -28,7 +28,7 @@ slog-term = "2.6.0" slog-async = "2.5.0" environment = { path = "./environment" } boot_node = { path = "../boot_node" } -futures = "0.3.5" +futures = "0.3.7" validator_client = { "path" = "../validator_client" } account_manager = { "path" = "../account_manager" } clap_utils = { path = "../common/clap_utils" } @@ -37,6 +37,7 @@ directory = { path = "../common/directory" } lighthouse_version = { path = "../common/lighthouse_version" } account_utils = { path = "../common/account_utils" } remote_signer = { "path" = "../remote_signer" } +tokio-compat-02 = "0.1" [dev-dependencies] tempfile = "3.1.0" diff --git a/lighthouse/environment/Cargo.toml b/lighthouse/environment/Cargo.toml index 7bb3faa6ad8..832505807de 100644 --- a/lighthouse/environment/Cargo.toml +++ b/lighthouse/environment/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] -tokio = { version = "0.2.22", features = ["macros"] } +tokio = { version = "0.3.2", features = ["macros", "rt", "rt-multi-thread" ] } slog = { version = "2.5.2", features = ["max_level_trace"] } sloggers = "1.0.1" types = { "path" = "../../consensus/types" } @@ -16,7 +16,7 @@ logging = { path = "../../common/logging" } slog-term = "2.6.0" slog-async = "2.5.0" ctrlc = { version = "3.1.6", features = ["termination"] } -futures = "0.3.5" +futures = "0.3.7" parking_lot = "0.11.0" slog-json = "2.3.0" exit-future = "0.2.0" diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index f6801801d1c..64f2a916eb5 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -15,12 +15,13 @@ use futures::channel::{ }; use futures::{future, StreamExt}; -use slog::{error, info, o, Drain, Level, Logger}; +use slog::{error, info, o, warn, Drain, Level, Logger}; use sloggers::{null::NullLoggerBuilder, Build}; use std::cell::RefCell; use std::ffi::OsStr; use std::fs::{rename as FsRename, OpenOptions}; use std::path::PathBuf; +use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; use task_executor::TaskExecutor; use tokio::runtime::{Builder as RuntimeBuilder, Runtime}; @@ -29,11 +30,11 @@ use types::{EthSpec, MainnetEthSpec, MinimalEthSpec, V012LegacyEthSpec}; pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; const LOG_CHANNEL_SIZE: usize = 2048; /// The maximum time in seconds the client will wait for all internal tasks to shutdown. -const MAXIMUM_SHUTDOWN_TIME: u64 = 3; +const MAXIMUM_SHUTDOWN_TIME: u64 = 15; /// Builds an `Environment`. pub struct EnvironmentBuilder { - runtime: Option, + runtime: Option>, log: Option, eth_spec_instance: E, eth2_config: Eth2Config, @@ -84,28 +85,12 @@ impl EnvironmentBuilder { /// /// The `Runtime` used is just the standard tokio runtime. pub fn multi_threaded_tokio_runtime(mut self) -> Result { - self.runtime = Some( - RuntimeBuilder::new() - .threaded_scheduler() + self.runtime = Some(Arc::new( + RuntimeBuilder::new_multi_thread() .enable_all() .build() .map_err(|e| format!("Failed to start runtime: {:?}", e))?, - ); - Ok(self) - } - - /// Specifies that a single-threaded tokio runtime should be used. Ideal for testing purposes - /// where tests are already multi-threaded. - /// - /// This can solve problems if "too many open files" errors are thrown during tests. - pub fn single_thread_tokio_runtime(mut self) -> Result { - self.runtime = Some( - RuntimeBuilder::new() - .basic_scheduler() - .enable_all() - .build() - .map_err(|e| format!("Failed to start runtime: {:?}", e))?, - ); + )); Ok(self) } @@ -329,7 +314,7 @@ impl RuntimeContext { /// An environment where Lighthouse services can run. Used to start a production beacon node or /// validator client, or to run tests that involve logging and async task execution. pub struct Environment { - runtime: Runtime, + runtime: Arc, /// Receiver side of an internal shutdown signal. signal_rx: Option>, /// Sender to request shutting down. @@ -347,15 +332,15 @@ impl Environment { /// /// Useful in the rare scenarios where it's necessary to block the current thread until a task /// is finished (e.g., during testing). - pub fn runtime(&mut self) -> &mut Runtime { - &mut self.runtime + pub fn runtime(&self) -> &Arc { + &self.runtime } /// Returns a `Context` where no "service" has been added to the logger output. pub fn core_context(&mut self) -> RuntimeContext { RuntimeContext { executor: TaskExecutor::new( - self.runtime().handle().clone(), + Arc::downgrade(self.runtime()), self.exit.clone(), self.log.clone(), self.signal_tx.clone(), @@ -369,7 +354,7 @@ impl Environment { pub fn service_context(&mut self, service_name: String) -> RuntimeContext { RuntimeContext { executor: TaskExecutor::new( - self.runtime().handle().clone(), + Arc::downgrade(self.runtime()), self.exit.clone(), self.log.new(o!("service" => service_name)), self.signal_tx.clone(), @@ -425,8 +410,16 @@ impl Environment { /// Shutdown the `tokio` runtime when all tasks are idle. pub fn shutdown_on_idle(self) { - self.runtime - .shutdown_timeout(std::time::Duration::from_secs(MAXIMUM_SHUTDOWN_TIME)) + match Arc::try_unwrap(self.runtime) { + Ok(runtime) => { + runtime.shutdown_timeout(std::time::Duration::from_secs(MAXIMUM_SHUTDOWN_TIME)) + } + Err(e) => warn!( + self.log, + "Failed to obtain runtime access to shutdown gracefully"; + "error" => ?e + ), + } } /// Fire exit signal which shuts down all spawned services diff --git a/lighthouse/environment/tests/environment_builder.rs b/lighthouse/environment/tests/environment_builder.rs index ef8d7206b92..439da974dbc 100644 --- a/lighthouse/environment/tests/environment_builder.rs +++ b/lighthouse/environment/tests/environment_builder.rs @@ -7,7 +7,7 @@ use types::{V012LegacyEthSpec, YamlConfig}; fn builder() -> EnvironmentBuilder { EnvironmentBuilder::v012_legacy() - .single_thread_tokio_runtime() + .multi_threaded_tokio_runtime() .expect("should set runtime") .null_logger() .expect("should set logger") diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index c3e49faf411..d8148f2e2de 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -7,6 +7,7 @@ use lighthouse_version::VERSION; use slog::{crit, info, warn}; use std::path::PathBuf; use std::process::exit; +use tokio_compat_02::FutureExt; use types::{EthSpec, EthSpecId}; use validator_client::ProductionValidatorClient; @@ -280,16 +281,19 @@ fn run( &context.eth2_config().spec, context.log().clone(), )?; - environment.runtime().spawn(async move { - if let Err(e) = ProductionBeaconNode::new(context.clone(), config).await { - crit!(log, "Failed to start beacon node"; "reason" => e); - // Ignore the error since it always occurs during normal operation when - // shutting down. - let _ = executor - .shutdown_sender() - .try_send("Failed to start beacon node"); + environment.runtime().spawn( + async move { + if let Err(e) = ProductionBeaconNode::new(context.clone(), config).await { + crit!(log, "Failed to start beacon node"; "reason" => e); + // Ignore the error since it always occurs during normal operation when + // shutting down. + let _ = executor + .shutdown_sender() + .try_send("Failed to start beacon node"); + } } - }); + .compat(), + ); } ("validator_client", Some(matches)) => { let context = environment.core_context(); @@ -297,23 +301,26 @@ fn run( let executor = context.executor.clone(); let config = validator_client::Config::from_cli(&matches, context.log()) .map_err(|e| format!("Unable to initialize validator config: {}", e))?; - environment.runtime().spawn(async move { - let run = async { - ProductionValidatorClient::new(context, config) - .await? - .start_service()?; - - Ok::<(), String>(()) - }; - if let Err(e) = run.await { - crit!(log, "Failed to start validator client"; "reason" => e); - // Ignore the error since it always occurs during normal operation when - // shutting down. - let _ = executor - .shutdown_sender() - .try_send("Failed to start validator client"); + environment.runtime().spawn( + async move { + let run = async { + ProductionValidatorClient::new(context, config) + .await? + .start_service()?; + + Ok::<(), String>(()) + }; + if let Err(e) = run.await { + crit!(log, "Failed to start validator client"; "reason" => e); + // Ignore the error since it always occurs during normal operation when + // shutting down. + let _ = executor + .shutdown_sender() + .try_send("Failed to start validator client"); + } } - }); + .compat(), + ); } ("remote_signer", Some(matches)) => { if let Err(e) = remote_signer::run(&mut environment, matches) { diff --git a/remote_signer/client/Cargo.toml b/remote_signer/client/Cargo.toml index dac014790e3..ff6ad10a25b 100644 --- a/remote_signer/client/Cargo.toml +++ b/remote_signer/client/Cargo.toml @@ -9,7 +9,7 @@ clap = "2.33.3" client_backend = { path = "../backend", package = "remote_signer_backend" } environment = { path = "../../lighthouse/environment" } futures = "0.3.6" -hyper = "0.13.8" +hyper = { git = "https://github.com/sigp/hyper", branch = "lighthouse" } lazy_static = "1.4.0" regex = "1.3.9" serde = { version = "1.0.116", features = ["derive"] } diff --git a/remote_signer/client/src/handler.rs b/remote_signer/client/src/handler.rs index ea5b64ba754..50390c559be 100644 --- a/remote_signer/client/src/handler.rs +++ b/remote_signer/client/src/handler.rs @@ -58,16 +58,14 @@ impl Handler { let (req_parts, _) = self.req.into_parts(); let req = Request::from_parts(req_parts, body); + // NOTE: The task executor now holds a weak reference to the global runtime. On shutdown + // there may be no runtime available. + // All these edge cases must be handled here. let value = executor - .runtime_handle() - .spawn_blocking(move || func(req, ctx)) + .spawn_blocking_handle(move || func(req, ctx), "remote_signer_request") + .ok_or_else(|| ApiError::ServerError("Runtime does not exist".to_string()))? .await - .map_err(|e| { - ApiError::ServerError(format!( - "Failed to get blocking join handle: {}", - e.to_string() - )) - })??; + .map_err(|_| ApiError::ServerError("Panic during execution".to_string()))??; Ok(HandledRequest { value }) } diff --git a/testing/eth1_test_rig/Cargo.toml b/testing/eth1_test_rig/Cargo.toml index d6a203b6675..d538d56c187 100644 --- a/testing/eth1_test_rig/Cargo.toml +++ b/testing/eth1_test_rig/Cargo.toml @@ -5,9 +5,9 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] -tokio = { version = "0.2.22", features = ["time"] } +tokio = { version = "0.3.2", features = ["time"] } web3 = "0.11.0" -futures = { version = "0.3.5", features = ["compat"] } +futures = { version = "0.3.7", features = ["compat"] } types = { path = "../../consensus/types"} serde_json = "1.0.58" deposit_contract = { path = "../../common/deposit_contract"} diff --git a/testing/eth1_test_rig/src/lib.rs b/testing/eth1_test_rig/src/lib.rs index 53715133bf5..64f26dce3b7 100644 --- a/testing/eth1_test_rig/src/lib.rs +++ b/testing/eth1_test_rig/src/lib.rs @@ -13,7 +13,7 @@ use deposit_contract::{ use futures::compat::Future01CompatExt; use ganache::GanacheInstance; use std::time::Duration; -use tokio::time::delay_for; +use tokio::time::sleep; use types::DepositData; use types::{test_utils::generate_deterministic_keypair, EthSpec, Hash256, Keypair, Signature}; use web3::contract::{Contract, Options}; @@ -220,7 +220,7 @@ impl DepositContract { /// Peforms many deposits, each preceded by a delay. pub async fn deposit_multiple(&self, deposits: Vec) -> Result<(), String> { for deposit in deposits.into_iter() { - delay_for(deposit.delay).await; + sleep(deposit.delay).await; self.deposit_async(deposit.deposit).await?; } Ok(()) diff --git a/testing/node_test_rig/Cargo.toml b/testing/node_test_rig/Cargo.toml index cfbd92620d7..5bbaa729c10 100644 --- a/testing/node_test_rig/Cargo.toml +++ b/testing/node_test_rig/Cargo.toml @@ -13,7 +13,7 @@ tempdir = "0.3.7" reqwest = { version = "0.10.8", features = ["native-tls-vendored"] } url = "2.1.1" serde = "1.0.116" -futures = "0.3.5" +futures = "0.3.7" genesis = { path = "../../beacon_node/genesis" } eth2 = { path = "../../common/eth2" } validator_client = { path = "../../validator_client" } diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index 2c2a0e81fea..4b1b0bec443 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -86,6 +86,7 @@ pub fn testing_client_config() -> ClientConfig { // Setting ports to `0` means that the OS will choose some available port. client_config.network.libp2p_port = 0; client_config.network.discovery_port = 0; + client_config.network.upnp_enabled = false; client_config.http_api.enabled = true; client_config.http_api.listen_port = 0; client_config.websocket_server.enabled = true; diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index 22174ba7f6b..0ca9e9bfd45 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -12,9 +12,10 @@ eth1 = {path = "../../beacon_node/eth1"} types = { path = "../../consensus/types" } validator_client = { path = "../../validator_client" } parking_lot = "0.11.0" -futures = "0.3.5" -tokio = "0.2.22" +futures = "0.3.7" +tokio = "0.3.2" eth1_test_rig = { path = "../eth1_test_rig" } env_logger = "0.7.1" clap = "2.33.3" rayon = "1.4.1" +tokio-compat-02 = "0.1" diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index e755c9005fa..236d356633a 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -46,13 +46,13 @@ pub async fn verify_first_finalization( /// Delays for `epochs`, plus half a slot extra. pub async fn epoch_delay(epochs: Epoch, slot_duration: Duration, slots_per_epoch: u64) { let duration = slot_duration * (epochs.as_u64() * slots_per_epoch) as u32 + slot_duration / 2; - tokio::time::delay_for(duration).await + tokio::time::sleep(duration).await } /// Delays for `slots`, plus half a slot extra. async fn slot_delay(slots: Slot, slot_duration: Duration) { let duration = slot_duration * slots.as_u64() as u32 + slot_duration / 2; - tokio::time::delay_for(duration).await; + tokio::time::sleep(duration).await; } /// Verifies that all beacon nodes in the given network have a head state that has a finalized diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 33fb3a2b6ce..ded98dffd86 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -194,6 +194,12 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { Ok::<(), String>(()) }; - env.runtime().block_on(main_future).unwrap(); + env.runtime() + .block_on(tokio_compat_02::FutureExt::compat(main_future)) + .unwrap(); + + env.fire_signal(); + env.shutdown_on_idle(); + Ok(()) } diff --git a/testing/simulator/src/no_eth1_sim.rs b/testing/simulator/src/no_eth1_sim.rs index 45292b23bbe..cccf413ed16 100644 --- a/testing/simulator/src/no_eth1_sim.rs +++ b/testing/simulator/src/no_eth1_sim.rs @@ -8,7 +8,7 @@ use node_test_rig::{ use rayon::prelude::*; use std::net::{IpAddr, Ipv4Addr}; use std::time::{Duration, SystemTime, UNIX_EPOCH}; -use tokio::time::{delay_until, Instant}; +use tokio::time::{sleep_until, Instant}; use types::{Epoch, EthSpec, MainnetEthSpec}; pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { @@ -111,7 +111,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { * The processes that will run checks on the network as it runs. */ let checks_fut = async { - delay_until(genesis_instant).await; + sleep_until(genesis_instant).await; let (finalization, block_prod) = futures::join!( // Check that the chain finalizes at the first given opportunity. @@ -156,6 +156,11 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { Ok::<(), String>(()) }; - env.runtime().block_on(main_future).unwrap(); + env.runtime() + .block_on(tokio_compat_02::FutureExt::compat(main_future)) + .unwrap(); + + env.fire_signal(); + env.shutdown_on_idle(); Ok(()) } diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 3dd8aa80036..6491a2bb7fb 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -9,9 +9,10 @@ name = "validator_client" path = "src/lib.rs" [dev-dependencies] -tokio = { version = "0.2.22", features = ["time", "rt-threaded", "macros"] } +tokio = { version = "0.3.2", features = ["time", "rt-multi-thread", "macros"] } tempfile = "3.1.0" deposit_contract = { path = "../common/deposit_contract" } +tokio-compat-02 = "0.1" [dependencies] eth2_ssz = "0.1.2" @@ -30,8 +31,8 @@ serde_yaml = "0.8.13" slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } slog-async = "2.5.0" slog-term = "2.6.0" -tokio = { version = "0.2.22", features = ["time"] } -futures = { version = "0.3.5", features = ["compat"] } +tokio = { version = "0.3.2", features = ["time"] } +futures = { version = "0.3.7", features = ["compat"] } dirs = "3.0.1" directory = { path = "../common/directory" } lockfile = { path = "../common/lockfile" } @@ -53,7 +54,7 @@ eth2_keystore = { path = "../crypto/eth2_keystore" } account_utils = { path = "../common/account_utils" } lighthouse_version = { path = "../common/lighthouse_version" } warp_utils = { path = "../common/warp_utils" } -warp = { git = "https://github.com/paulhauner/warp", branch = "cors-wildcard" } +warp = { git = "https://github.com/sigp/warp ", branch = "lighthouse" } hyper = "0.13.8" serde_utils = { path = "../consensus/serde_utils" } libsecp256k1 = "0.3.5" diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index fa4457eb10c..e9931c164e1 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -5,13 +5,14 @@ use crate::{ }; use environment::RuntimeContext; use eth2::BeaconNodeHttpClient; +use futures::future::FutureExt; use futures::StreamExt; use slog::{crit, error, info, trace}; use slot_clock::SlotClock; use std::collections::HashMap; use std::ops::Deref; use std::sync::Arc; -use tokio::time::{delay_until, interval_at, Duration, Instant}; +use tokio::time::{interval_at, sleep_until, Duration, Instant}; use tree_hash::TreeHash; use types::{ AggregateSignature, Attestation, AttestationData, BitList, ChainSpec, CommitteeIndex, EthSpec, @@ -211,13 +212,16 @@ impl AttestationService { .into_iter() .for_each(|(committee_index, validator_duties)| { // Spawn a separate task for each attestation. - self.inner.context.executor.runtime_handle().spawn( - self.clone().publish_attestations_and_aggregates( - slot, - committee_index, - validator_duties, - aggregate_production_instant, - ), + self.inner.context.executor.spawn( + self.clone() + .publish_attestations_and_aggregates( + slot, + committee_index, + validator_duties, + aggregate_production_instant, + ) + .map(|_| ()), + "attestation publish", ); }); @@ -278,7 +282,7 @@ impl AttestationService { // of the way though the slot). As verified in the // `delay_triggers_when_in_the_past` test, this code will still run // even if the instant has already elapsed. - delay_until(aggregate_production_instant).await; + sleep_until(aggregate_production_instant).await; // Start the metrics timer *after* we've done the delay. let _aggregates_timer = metrics::start_timer_vec( @@ -552,7 +556,7 @@ mod tests { use futures::future::FutureExt; use parking_lot::RwLock; - /// This test is to ensure that a `tokio_timer::Delay` with an instant in the past will still + /// This test is to ensure that a `tokio_timer::Sleep` with an instant in the past will still /// trigger. #[tokio::test] async fn delay_triggers_when_in_the_past() { @@ -560,7 +564,7 @@ mod tests { let state_1 = Arc::new(RwLock::new(in_the_past)); let state_2 = state_1.clone(); - delay_until(in_the_past) + sleep_until(in_the_past) .map(move |()| *state_1.write() = Instant::now()) .await; diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 2eb3c70b5b2..324d34ba6a1 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -188,21 +188,22 @@ impl BlockService { ) } - proposers.into_iter().for_each(|validator_pubkey| { + for validator_pubkey in proposers { let service = self.clone(); let log = log.clone(); - self.inner.context.executor.runtime_handle().spawn( + self.inner.context.executor.spawn( service .publish_block(slot, validator_pubkey) - .map_err(move |e| { + .unwrap_or_else(move |e| { crit!( log, "Error whilst producing block"; "message" => e - ) + ); }), + "block service", ); - }); + } Ok(()) } diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index b266f5d387c..781a6f5fb73 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -481,15 +481,14 @@ impl DutiesService { let duties_service = self.clone(); let mut block_service_tx_clone = block_service_tx.clone(); let inner_spec = spec.clone(); - self.inner - .context - .executor - .runtime_handle() - .spawn(async move { + self.inner.context.executor.spawn( + async move { duties_service .do_update(&mut block_service_tx_clone, &inner_spec) .await - }); + }, + "duties update", + ); let executor = self.inner.context.executor.clone(); diff --git a/validator_client/src/fork_service.rs b/validator_client/src/fork_service.rs index 7af171b6734..0fca6fb6412 100644 --- a/validator_client/src/fork_service.rs +++ b/validator_client/src/fork_service.rs @@ -1,6 +1,7 @@ use crate::http_metrics::metrics; use environment::RuntimeContext; use eth2::{types::StateId, BeaconNodeHttpClient}; +use futures::future::FutureExt; use futures::StreamExt; use parking_lot::RwLock; use slog::Logger; @@ -144,8 +145,7 @@ impl ForkService { // Run an immediate update before starting the updater service. context .executor - .runtime_handle() - .spawn(self.clone().do_update()); + .spawn(self.clone().do_update().map(|_| ()), "fork service update"); let executor = context.executor.clone(); diff --git a/validator_client/src/http_api/create_validator.rs b/validator_client/src/http_api/create_validator.rs index a186d4e9843..bc4f21561f8 100644 --- a/validator_client/src/http_api/create_validator.rs +++ b/validator_client/src/http_api/create_validator.rs @@ -21,7 +21,7 @@ use validator_dir::Builder as ValidatorDirBuilder; /// /// If `key_derivation_path_offset` is supplied then the EIP-2334 validator index will start at /// this point. -pub fn create_validators, T: 'static + SlotClock, E: EthSpec>( +pub async fn create_validators, T: 'static + SlotClock, E: EthSpec>( mnemonic_opt: Option, key_derivation_path_offset: Option, validator_requests: &[api_types::ValidatorRequest], @@ -129,12 +129,9 @@ pub fn create_validators, T: 'static + SlotClock, E: EthSpec>( let voting_keystore_path = validator_dir.voting_keystore_path(); drop(validator_dir); - tokio::runtime::Handle::current() - .block_on(validator_store.add_validator_keystore( - voting_keystore_path, - voting_password_string, - request.enable, - )) + validator_store + .add_validator_keystore(voting_keystore_path, voting_password_string, request.enable) + .await .map_err(|e| { warp_utils::reject::custom_server_error(format!( "failed to initialize validator: {:?}", diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index 9172cee5bf6..a75cc97036f 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -14,7 +14,8 @@ use std::future::Future; use std::marker::PhantomData; use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use std::path::PathBuf; -use std::sync::Arc; +use std::sync::{Arc, Weak}; +use tokio::runtime::Runtime; use types::{ChainSpec, EthSpec, YamlConfig}; use validator_dir::Builder as ValidatorDirBuilder; use warp::{ @@ -50,6 +51,7 @@ impl From for Error { /// /// The server will gracefully handle the case where any fields are `None`. pub struct Context { + pub runtime: Weak, pub api_secret: ApiSecret, pub validator_store: Option>, pub validator_dir: Option, @@ -138,6 +140,9 @@ pub fn serve( }) }); + let inner_runtime = ctx.runtime.clone(); + let runtime_filter = warp::any().map(move || inner_runtime.clone()); + let inner_validator_dir = ctx.validator_dir.clone(); let validator_dir_filter = warp::any() .map(move || inner_validator_dir.clone()) @@ -258,26 +263,34 @@ pub fn serve( .and(validator_store_filter.clone()) .and(spec_filter.clone()) .and(signer.clone()) + .and(runtime_filter.clone()) .and_then( |body: Vec, validator_dir: PathBuf, validator_store: ValidatorStore, spec: Arc, - signer| { + signer, + runtime: Weak| { blocking_signed_json_task(signer, move || { - let (validators, mnemonic) = create_validators( - None, - None, - &body, - &validator_dir, - &validator_store, - &spec, - )?; - let response = api_types::PostValidatorsResponseData { - mnemonic: mnemonic.into_phrase().into(), - validators, - }; - Ok(api_types::GenericResponse::from(response)) + if let Some(runtime) = runtime.upgrade() { + let (validators, mnemonic) = runtime.block_on(create_validators( + None, + None, + &body, + &validator_dir, + &validator_store, + &spec, + ))?; + let response = api_types::PostValidatorsResponseData { + mnemonic: mnemonic.into_phrase().into(), + validators, + }; + Ok(api_types::GenericResponse::from(response)) + } else { + Err(warp_utils::reject::custom_server_error( + "Runtime shutdown".into(), + )) + } }) }, ); @@ -292,25 +305,37 @@ pub fn serve( .and(validator_store_filter.clone()) .and(spec_filter) .and(signer.clone()) + .and(runtime_filter.clone()) .and_then( |body: api_types::CreateValidatorsMnemonicRequest, validator_dir: PathBuf, validator_store: ValidatorStore, spec: Arc, - signer| { + signer, + runtime: Weak| { blocking_signed_json_task(signer, move || { - let mnemonic = mnemonic_from_phrase(body.mnemonic.as_str()).map_err(|e| { - warp_utils::reject::custom_bad_request(format!("invalid mnemonic: {:?}", e)) - })?; - let (validators, _mnemonic) = create_validators( - Some(mnemonic), - Some(body.key_derivation_path_offset), - &body.validators, - &validator_dir, - &validator_store, - &spec, - )?; - Ok(api_types::GenericResponse::from(validators)) + if let Some(runtime) = runtime.upgrade() { + let mnemonic = + mnemonic_from_phrase(body.mnemonic.as_str()).map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "invalid mnemonic: {:?}", + e + )) + })?; + let (validators, _mnemonic) = runtime.block_on(create_validators( + Some(mnemonic), + Some(body.key_derivation_path_offset), + &body.validators, + &validator_dir, + &validator_store, + &spec, + ))?; + Ok(api_types::GenericResponse::from(validators)) + } else { + Err(warp_utils::reject::custom_server_error( + "Runtime shutdown".into(), + )) + } }) }, ); @@ -324,11 +349,13 @@ pub fn serve( .and(validator_dir_filter) .and(validator_store_filter.clone()) .and(signer.clone()) + .and(runtime_filter.clone()) .and_then( |body: api_types::KeystoreValidatorsPostRequest, validator_dir: PathBuf, validator_store: ValidatorStore, - signer| { + signer, + runtime: Weak| { blocking_signed_json_task(signer, move || { // Check to ensure the password is correct. let keypair = body @@ -357,18 +384,26 @@ pub fn serve( drop(validator_dir); let voting_password = body.password.clone(); - let validator_def = tokio::runtime::Handle::current() - .block_on(validator_store.add_validator_keystore( - voting_keystore_path, - voting_password, - body.enable, - )) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to initialize validator: {:?}", - e - )) - })?; + let validator_def = { + if let Some(runtime) = runtime.upgrade() { + runtime + .block_on(validator_store.add_validator_keystore( + voting_keystore_path, + voting_password, + body.enable, + )) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to initialize validator: {:?}", + e + )) + })? + } else { + return Err(warp_utils::reject::custom_server_error( + "Runtime shutdown".into(), + )); + } + }; Ok(api_types::GenericResponse::from(api_types::ValidatorData { enabled: body.enable, @@ -387,11 +422,13 @@ pub fn serve( .and(warp::body::json()) .and(validator_store_filter) .and(signer) + .and(runtime_filter) .and_then( |validator_pubkey: PublicKey, body: api_types::ValidatorPatchRequest, validator_store: ValidatorStore, - signer| { + signer, + runtime: Weak| { blocking_signed_json_task(signer, move || { let initialized_validators_rw_lock = validator_store.initialized_validators(); let mut initialized_validators = initialized_validators_rw_lock.write(); @@ -403,19 +440,24 @@ pub fn serve( ))), Some(enabled) if enabled == body.enabled => Ok(()), Some(_) => { - tokio::runtime::Handle::current() - .block_on( - initialized_validators - .set_validator_status(&validator_pubkey, body.enabled), - ) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "unable to set validator status: {:?}", - e - )) - })?; - - Ok(()) + if let Some(runtime) = runtime.upgrade() { + runtime + .block_on( + initialized_validators + .set_validator_status(&validator_pubkey, body.enabled), + ) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "unable to set validator status: {:?}", + e + )) + })?; + Ok(()) + } else { + Err(warp_utils::reject::custom_server_error( + "Runtime shutdown".into(), + )) + } } } }) @@ -471,8 +513,8 @@ pub async fn blocking_signed_json_task( ) -> Result where S: Fn(&[u8]) -> String, - F: Fn() -> Result, - T: Serialize, + F: Fn() -> Result + Send + 'static, + T: Serialize + Send + 'static, { warp_utils::task::blocking_task(func) .await diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index 594b82e2755..6df6bfc3edb 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -23,7 +23,9 @@ use std::marker::PhantomData; use std::net::Ipv4Addr; use std::sync::Arc; use tempfile::{tempdir, TempDir}; +use tokio::runtime::Runtime; use tokio::sync::oneshot; +use tokio_compat_02::FutureExt; const PASSWORD_BYTES: &[u8] = &[42, 50, 37]; @@ -37,8 +39,18 @@ struct ApiTester { _validator_dir: TempDir, } +// Builds a runtime to be used in the testing configuration. +fn build_runtime() -> Arc { + Arc::new( + tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .expect("Should be able to build a testing runtime"), + ) +} + impl ApiTester { - pub async fn new() -> Self { + pub async fn new(runtime: std::sync::Weak) -> Self { let log = null_logger().unwrap(); let validator_dir = tempdir().unwrap(); @@ -80,6 +92,7 @@ impl ApiTester { let initialized_validators = validator_store.initialized_validators(); let context: Arc> = Arc::new(Context { + runtime, api_secret, validator_dir: Some(validator_dir.path().into()), validator_store: Some(validator_store), @@ -420,110 +433,145 @@ struct KeystoreValidatorScenario { correct_password: bool, } -#[tokio::test(core_threads = 2)] -async fn invalid_pubkey() { - ApiTester::new() - .await - .invalidate_api_token() - .test_get_lighthouse_version_invalid() - .await; +#[test] +fn invalid_pubkey() { + let runtime = build_runtime(); + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on( + async { + ApiTester::new(weak_runtime) + .await + .invalidate_api_token() + .test_get_lighthouse_version_invalid() + .await; + } + .compat(), + ); } -#[tokio::test(core_threads = 2)] -async fn simple_getters() { - ApiTester::new() - .await - .test_get_lighthouse_version() - .await - .test_get_lighthouse_health() - .await - .test_get_lighthouse_spec() - .await; +#[test] +fn simple_getters() { + let runtime = build_runtime(); + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on( + async { + ApiTester::new(weak_runtime) + .await + .test_get_lighthouse_version() + .await + .test_get_lighthouse_health() + .await + .test_get_lighthouse_spec() + .await; + } + .compat(), + ); } -#[tokio::test(core_threads = 2)] -async fn hd_validator_creation() { - ApiTester::new() - .await - .assert_enabled_validators_count(0) - .assert_validators_count(0) - .create_hd_validators(HdValidatorScenario { - count: 2, - specify_mnemonic: true, - key_derivation_path_offset: 0, - disabled: vec![], - }) - .await - .assert_enabled_validators_count(2) - .assert_validators_count(2) - .create_hd_validators(HdValidatorScenario { - count: 1, - specify_mnemonic: false, - key_derivation_path_offset: 0, - disabled: vec![0], - }) - .await - .assert_enabled_validators_count(2) - .assert_validators_count(3) - .create_hd_validators(HdValidatorScenario { - count: 0, - specify_mnemonic: true, - key_derivation_path_offset: 4, - disabled: vec![], - }) - .await - .assert_enabled_validators_count(2) - .assert_validators_count(3); +#[test] +fn hd_validator_creation() { + let runtime = build_runtime(); + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on( + async { + ApiTester::new(weak_runtime) + .await + .assert_enabled_validators_count(0) + .assert_validators_count(0) + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: true, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .create_hd_validators(HdValidatorScenario { + count: 1, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![0], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(3) + .create_hd_validators(HdValidatorScenario { + count: 0, + specify_mnemonic: true, + key_derivation_path_offset: 4, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(3); + } + .compat(), + ); } -#[tokio::test(core_threads = 2)] -async fn validator_enabling() { - ApiTester::new() - .await - .create_hd_validators(HdValidatorScenario { - count: 2, - specify_mnemonic: false, - key_derivation_path_offset: 0, - disabled: vec![], - }) - .await - .assert_enabled_validators_count(2) - .assert_validators_count(2) - .set_validator_enabled(0, false) - .await - .assert_enabled_validators_count(1) - .assert_validators_count(2) - .set_validator_enabled(0, true) - .await - .assert_enabled_validators_count(2) - .assert_validators_count(2); +#[test] +fn validator_enabling() { + let runtime = build_runtime(); + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on( + async { + ApiTester::new(weak_runtime) + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .set_validator_enabled(0, false) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2) + .set_validator_enabled(0, true) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2); + } + .compat(), + ); } -#[tokio::test(core_threads = 2)] -async fn keystore_validator_creation() { - ApiTester::new() - .await - .assert_enabled_validators_count(0) - .assert_validators_count(0) - .create_keystore_validators(KeystoreValidatorScenario { - correct_password: true, - enabled: true, - }) - .await - .assert_enabled_validators_count(1) - .assert_validators_count(1) - .create_keystore_validators(KeystoreValidatorScenario { - correct_password: false, - enabled: true, - }) - .await - .assert_enabled_validators_count(1) - .assert_validators_count(1) - .create_keystore_validators(KeystoreValidatorScenario { - correct_password: true, - enabled: false, - }) - .await - .assert_enabled_validators_count(1) - .assert_validators_count(2); +#[test] +fn keystore_validator_creation() { + let runtime = build_runtime(); + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on( + async { + ApiTester::new(weak_runtime) + .await + .assert_enabled_validators_count(0) + .assert_validators_count(0) + .create_keystore_validators(KeystoreValidatorScenario { + correct_password: true, + enabled: true, + }) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(1) + .create_keystore_validators(KeystoreValidatorScenario { + correct_password: false, + enabled: true, + }) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(1) + .create_keystore_validators(KeystoreValidatorScenario { + correct_password: true, + enabled: false, + }) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2); + } + .compat(), + ); } diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index e495d7e492c..cbe42aaf455 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -38,7 +38,7 @@ use std::marker::PhantomData; use std::net::SocketAddr; use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; -use tokio::time::{delay_for, Duration}; +use tokio::time::{sleep, Duration}; use types::{EthSpec, Hash256}; use validator_store::ValidatorStore; @@ -337,6 +337,7 @@ impl ProductionValidatorClient { self.http_api_listen_addr = if self.config.http_api.enabled { let ctx: Arc> = Arc::new(http_api::Context { + runtime: self.context.executor.runtime(), api_secret, validator_store: Some(self.validator_store.clone()), validator_dir: Some(self.config.validator_dir.clone()), @@ -415,7 +416,7 @@ async fn init_from_beacon_node( } } - delay_for(RETRY_DELAY).await; + sleep(RETRY_DELAY).await; }; Ok((genesis.genesis_time, genesis.genesis_validators_root)) @@ -447,7 +448,7 @@ async fn wait_for_genesis( // timer runs out. tokio::select! { result = poll_whilst_waiting_for_genesis(beacon_node, genesis_time, context.log()) => result?, - () = delay_for(genesis_time - now) => () + () = sleep(genesis_time - now) => () }; info!( @@ -497,7 +498,7 @@ async fn wait_for_connectivity( "Unable to connect to beacon node"; "error" => format!("{:?}", e), ); - delay_for(RETRY_DELAY).await; + sleep(RETRY_DELAY).await; } } } @@ -546,6 +547,6 @@ async fn poll_whilst_waiting_for_genesis( } } - delay_for(WAITING_FOR_GENESIS_POLL_TIME).await; + sleep(WAITING_FOR_GENESIS_POLL_TIME).await; } }